1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2023 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "insn-config.h"
34 #include "sparseset.h"
37 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx_insn
*,
38 ira_loop_tree_node_t
);
40 /* The root of the loop tree corresponding to the all function. */
41 ira_loop_tree_node_t ira_loop_tree_root
;
43 /* Height of the loop tree. */
44 int ira_loop_tree_height
;
46 /* All nodes representing basic blocks are referred through the
47 following array. We cannot use basic block member `aux' for this
48 because it is used for insertion of insns on edges. */
49 ira_loop_tree_node_t ira_bb_nodes
;
51 /* All nodes representing loops are referred through the following
53 ira_loop_tree_node_t ira_loop_nodes
;
55 /* And size of the ira_loop_nodes array. */
56 unsigned int ira_loop_nodes_count
;
58 /* Map regno -> allocnos with given regno (see comments for
59 allocno member `next_regno_allocno'). */
60 ira_allocno_t
*ira_regno_allocno_map
;
62 /* Array of references to all allocnos. The order number of the
63 allocno corresponds to the index in the array. Removed allocnos
64 have NULL element value. */
65 ira_allocno_t
*ira_allocnos
;
67 /* Sizes of the previous array. */
70 /* Count of conflict record structures we've created, used when creating
74 /* Map a conflict id to its conflict record. */
75 ira_object_t
*ira_object_id_map
;
77 /* Array of references to all allocno preferences. The order number
78 of the preference corresponds to the index in the array. */
79 ira_pref_t
*ira_prefs
;
81 /* Size of the previous array. */
84 /* Array of references to all copies. The order number of the copy
85 corresponds to the index in the array. Removed copies have NULL
87 ira_copy_t
*ira_copies
;
89 /* Size of the previous array. */
94 /* LAST_BASIC_BLOCK before generating additional insns because of live
95 range splitting. Emitting insns on a critical edge creates a new
97 static int last_basic_block_before_change
;
99 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
100 the member loop_num. */
102 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
104 int max_regno
= max_reg_num ();
106 node
->regno_allocno_map
107 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
108 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
109 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
110 node
->all_allocnos
= ira_allocate_bitmap ();
111 node
->modified_regnos
= ira_allocate_bitmap ();
112 node
->border_allocnos
= ira_allocate_bitmap ();
113 node
->local_copies
= ira_allocate_bitmap ();
114 node
->loop_num
= loop_num
;
115 node
->children
= NULL
;
116 node
->subloops
= NULL
;
120 /* The following function allocates the loop tree nodes. If
121 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
122 the root which corresponds the all function) will be not allocated
123 but nodes will still be allocated for basic blocks. */
125 create_loop_tree_nodes (void)
134 = ((struct ira_loop_tree_node
*)
135 ira_allocate (sizeof (struct ira_loop_tree_node
)
136 * last_basic_block_for_fn (cfun
)));
137 last_basic_block_before_change
= last_basic_block_for_fn (cfun
);
138 for (i
= 0; i
< (unsigned int) last_basic_block_for_fn (cfun
); i
++)
140 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
141 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
142 sizeof (ira_bb_nodes
[i
].reg_pressure
));
143 ira_bb_nodes
[i
].all_allocnos
= NULL
;
144 ira_bb_nodes
[i
].modified_regnos
= NULL
;
145 ira_bb_nodes
[i
].border_allocnos
= NULL
;
146 ira_bb_nodes
[i
].local_copies
= NULL
;
148 if (current_loops
== NULL
)
150 ira_loop_nodes_count
= 1;
151 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
152 ira_allocate (sizeof (struct ira_loop_tree_node
)));
153 init_loop_tree_node (ira_loop_nodes
, 0);
156 ira_loop_nodes_count
= number_of_loops (cfun
);
157 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
158 ira_allocate (sizeof (struct ira_loop_tree_node
)
159 * ira_loop_nodes_count
));
160 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
162 if (loop_outer (loop
) != NULL
)
164 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
166 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
167 if (e
->src
!= loop
->latch
168 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
175 auto_vec
<edge
> edges
= get_loop_exit_edges (loop
);
176 FOR_EACH_VEC_ELT (edges
, j
, e
)
177 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
185 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
189 /* The function returns TRUE if there are more one allocation
192 more_one_region_p (void)
197 if (current_loops
!= NULL
)
198 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
199 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
200 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
205 /* Free the loop tree node of a loop. */
207 finish_loop_tree_node (ira_loop_tree_node_t loop
)
209 if (loop
->regno_allocno_map
!= NULL
)
211 ira_assert (loop
->bb
== NULL
);
212 ira_free_bitmap (loop
->local_copies
);
213 ira_free_bitmap (loop
->border_allocnos
);
214 ira_free_bitmap (loop
->modified_regnos
);
215 ira_free_bitmap (loop
->all_allocnos
);
216 ira_free (loop
->regno_allocno_map
);
217 loop
->regno_allocno_map
= NULL
;
221 /* Free the loop tree nodes. */
223 finish_loop_tree_nodes (void)
227 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
228 finish_loop_tree_node (&ira_loop_nodes
[i
]);
229 ira_free (ira_loop_nodes
);
230 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
232 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
233 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
234 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
235 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
236 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
237 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
238 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
239 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
240 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
241 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
243 ira_free (ira_bb_nodes
);
248 /* The following recursive function adds LOOP to the loop tree
249 hierarchy. LOOP is added only once. If LOOP is NULL we adding
250 loop designating the whole function when CFG loops are not
253 add_loop_to_tree (class loop
*loop
)
257 ira_loop_tree_node_t loop_node
, parent_node
;
259 /* We cannot use loop node access macros here because of potential
260 checking and because the nodes are not initialized enough
262 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
263 add_loop_to_tree (loop_outer (loop
));
264 loop_num
= loop
!= NULL
? loop
->num
: 0;
265 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
266 && ira_loop_nodes
[loop_num
].children
== NULL
)
268 /* We have not added loop node to the tree yet. */
269 loop_node
= &ira_loop_nodes
[loop_num
];
270 loop_node
->loop
= loop
;
271 loop_node
->bb
= NULL
;
276 for (parent
= loop_outer (loop
);
278 parent
= loop_outer (parent
))
279 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
284 loop_node
->next
= NULL
;
285 loop_node
->subloop_next
= NULL
;
286 loop_node
->parent
= NULL
;
290 parent_node
= &ira_loop_nodes
[parent
->num
];
291 loop_node
->next
= parent_node
->children
;
292 parent_node
->children
= loop_node
;
293 loop_node
->subloop_next
= parent_node
->subloops
;
294 parent_node
->subloops
= loop_node
;
295 loop_node
->parent
= parent_node
;
300 /* The following recursive function sets up levels of nodes of the
301 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
302 The function returns maximal value of level in the tree + 1. */
304 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
306 int height
, max_height
;
307 ira_loop_tree_node_t subloop_node
;
309 ira_assert (loop_node
->bb
== NULL
);
310 loop_node
->level
= level
;
311 max_height
= level
+ 1;
312 for (subloop_node
= loop_node
->subloops
;
313 subloop_node
!= NULL
;
314 subloop_node
= subloop_node
->subloop_next
)
316 ira_assert (subloop_node
->bb
== NULL
);
317 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
318 if (height
> max_height
)
324 /* Create the loop tree. The algorithm is designed to provide correct
325 order of loops (they are ordered by their last loop BB) and basic
326 blocks in the chain formed by member next. */
328 form_loop_tree (void)
332 ira_loop_tree_node_t bb_node
, loop_node
;
334 /* We cannot use loop/bb node access macros because of potential
335 checking and because the nodes are not initialized enough
337 FOR_EACH_BB_FN (bb
, cfun
)
339 bb_node
= &ira_bb_nodes
[bb
->index
];
341 bb_node
->loop
= NULL
;
342 bb_node
->subloops
= NULL
;
343 bb_node
->children
= NULL
;
344 bb_node
->subloop_next
= NULL
;
345 bb_node
->next
= NULL
;
346 if (current_loops
== NULL
)
350 for (parent
= bb
->loop_father
;
352 parent
= loop_outer (parent
))
353 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
356 add_loop_to_tree (parent
);
357 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
358 bb_node
->next
= loop_node
->children
;
359 bb_node
->parent
= loop_node
;
360 loop_node
->children
= bb_node
;
362 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
363 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
364 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
369 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
372 rebuild_regno_allocno_maps (void)
375 int max_regno
, regno
;
377 ira_loop_tree_node_t loop_tree_node
;
379 ira_allocno_iterator ai
;
381 ira_assert (current_loops
!= NULL
);
382 max_regno
= max_reg_num ();
383 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
384 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
386 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
387 ira_loop_nodes
[l
].regno_allocno_map
388 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
390 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
391 sizeof (ira_allocno_t
) * max_regno
);
393 ira_free (ira_regno_allocno_map
);
394 ira_regno_allocno_map
395 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
396 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
397 FOR_EACH_ALLOCNO (a
, ai
)
399 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
400 /* Caps are not in the regno allocno maps. */
402 regno
= ALLOCNO_REGNO (a
);
403 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
404 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
405 ira_regno_allocno_map
[regno
] = a
;
406 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
407 /* Remember that we can create temporary allocnos to break
408 cycles in register shuffle. */
409 loop_tree_node
->regno_allocno_map
[regno
] = a
;
414 /* Pools for allocnos, allocno live ranges and objects. */
415 static object_allocator
<live_range
> live_range_pool ("live ranges");
416 static object_allocator
<ira_allocno
> allocno_pool ("allocnos");
417 static object_allocator
<ira_object
> object_pool ("objects");
419 /* Vec containing references to all created allocnos. It is a
420 container of array allocnos. */
421 static vec
<ira_allocno_t
> allocno_vec
;
423 /* Vec containing references to all created ira_objects. It is a
424 container of ira_object_id_map. */
425 static vec
<ira_object_t
> ira_object_id_map_vec
;
427 /* Initialize data concerning allocnos. */
429 initiate_allocnos (void)
431 allocno_vec
.create (max_reg_num () * 2);
433 ira_allocnos_num
= 0;
435 ira_object_id_map_vec
.create (max_reg_num () * 2);
436 ira_object_id_map
= NULL
;
437 ira_regno_allocno_map
438 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
439 * sizeof (ira_allocno_t
));
440 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
443 /* Create and return an object corresponding to a new allocno A. */
445 ira_create_object (ira_allocno_t a
, int subword
)
447 enum reg_class aclass
= ALLOCNO_CLASS (a
);
448 ira_object_t obj
= object_pool
.allocate ();
450 OBJECT_ALLOCNO (obj
) = a
;
451 OBJECT_SUBWORD (obj
) = subword
;
452 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
453 OBJECT_CONFLICT_VEC_P (obj
) = false;
454 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
455 OBJECT_NUM_CONFLICTS (obj
) = 0;
456 OBJECT_CONFLICT_HARD_REGS (obj
) = ira_no_alloc_regs
;
457 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) = ira_no_alloc_regs
;
458 OBJECT_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
459 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
460 OBJECT_MIN (obj
) = INT_MAX
;
461 OBJECT_MAX (obj
) = -1;
462 OBJECT_LIVE_RANGES (obj
) = NULL
;
464 ira_object_id_map_vec
.safe_push (obj
);
466 = ira_object_id_map_vec
.address ();
467 ira_objects_num
= ira_object_id_map_vec
.length ();
472 /* Create and return the allocno corresponding to REGNO in
473 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
474 same regno if CAP_P is FALSE. */
476 ira_create_allocno (int regno
, bool cap_p
,
477 ira_loop_tree_node_t loop_tree_node
)
481 a
= allocno_pool
.allocate ();
482 ALLOCNO_REGNO (a
) = regno
;
483 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
486 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
487 ira_regno_allocno_map
[regno
] = a
;
488 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
489 /* Remember that we can create temporary allocnos to break
490 cycles in register shuffle on region borders (see
492 loop_tree_node
->regno_allocno_map
[regno
] = a
;
494 ALLOCNO_CAP (a
) = NULL
;
495 ALLOCNO_CAP_MEMBER (a
) = NULL
;
496 ALLOCNO_NUM (a
) = ira_allocnos_num
;
497 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
498 ALLOCNO_NREFS (a
) = 0;
499 ALLOCNO_FREQ (a
) = 0;
500 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a
) = false;
501 ALLOCNO_HARD_REGNO (a
) = -1;
502 ALLOCNO_CALL_FREQ (a
) = 0;
503 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
504 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
505 ALLOCNO_CROSSED_CALLS_ABIS (a
) = 0;
506 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
508 ALLOCNO_NO_STACK_REG_P (a
) = false;
509 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
511 ALLOCNO_DONT_REASSIGN_P (a
) = false;
512 ALLOCNO_BAD_SPILL_P (a
) = false;
513 ALLOCNO_ASSIGNED_P (a
) = false;
514 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
515 ALLOCNO_WMODE (a
) = ALLOCNO_MODE (a
);
516 ALLOCNO_PREFS (a
) = NULL
;
517 ALLOCNO_COPIES (a
) = NULL
;
518 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
519 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
520 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
521 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
522 ALLOCNO_CLASS (a
) = NO_REGS
;
523 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
524 ALLOCNO_CLASS_COST (a
) = 0;
525 ALLOCNO_MEMORY_COST (a
) = 0;
526 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
527 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
528 ALLOCNO_NUM_OBJECTS (a
) = 0;
530 ALLOCNO_ADD_DATA (a
) = NULL
;
531 allocno_vec
.safe_push (a
);
532 ira_allocnos
= allocno_vec
.address ();
533 ira_allocnos_num
= allocno_vec
.length ();
538 /* Set up register class for A and update its conflict hard
541 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
543 ira_allocno_object_iterator oi
;
546 ALLOCNO_CLASS (a
) = aclass
;
547 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
549 OBJECT_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
550 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= ~reg_class_contents
[aclass
];
554 /* Determine the number of objects we should associate with allocno A
555 and allocate them. */
557 ira_create_allocno_objects (ira_allocno_t a
)
559 machine_mode mode
= ALLOCNO_MODE (a
);
560 enum reg_class aclass
= ALLOCNO_CLASS (a
);
561 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
564 if (n
!= 2 || maybe_ne (GET_MODE_SIZE (mode
), n
* UNITS_PER_WORD
))
567 ALLOCNO_NUM_OBJECTS (a
) = n
;
568 for (i
= 0; i
< n
; i
++)
569 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
572 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
573 ALLOCNO_OBJECT structures. This must be called after the allocno
574 classes are known. */
576 create_allocno_objects (void)
579 ira_allocno_iterator ai
;
581 FOR_EACH_ALLOCNO (a
, ai
)
582 ira_create_allocno_objects (a
);
585 /* Merge hard register conflict information for all objects associated with
586 allocno TO into the corresponding objects associated with FROM.
587 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
589 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
593 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
594 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
596 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
597 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
600 OBJECT_CONFLICT_HARD_REGS (to_obj
)
601 |= OBJECT_CONFLICT_HARD_REGS (from_obj
);
602 OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
)
603 |= OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
);
606 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
607 ALLOCNO_NO_STACK_REG_P (to
) = true;
608 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
609 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
613 /* Update hard register conflict information for all objects associated with
614 A to include the regs in SET. */
616 ior_hard_reg_conflicts (ira_allocno_t a
, const_hard_reg_set set
)
618 ira_allocno_object_iterator i
;
621 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
623 OBJECT_CONFLICT_HARD_REGS (obj
) |= set
;
624 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
) |= set
;
628 /* Return TRUE if a conflict vector with NUM elements is more
629 profitable than a conflict bit vector for OBJ. */
631 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
634 int max
= OBJECT_MAX (obj
);
635 int min
= OBJECT_MIN (obj
);
638 /* We prefer a bit vector in such case because it does not result
642 nbytes
= (max
- min
) / 8 + 1;
643 STATIC_ASSERT (sizeof (ira_object_t
) <= 8);
644 /* Don't use sizeof (ira_object_t), use constant 8. Size of ira_object_t (a
645 pointer) is different on 32-bit and 64-bit targets. Usage sizeof
646 (ira_object_t) can result in different code generation by GCC built as 32-
647 and 64-bit program. In any case the profitability is just an estimation
648 and border cases are rare. */
649 return (2 * 8 /* sizeof (ira_object_t) */ * (num
+ 1) < 3 * nbytes
);
652 /* Allocates and initialize the conflict vector of OBJ for NUM
653 conflicting objects. */
655 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
660 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
661 num
++; /* for NULL end marker */
662 size
= sizeof (ira_object_t
) * num
;
663 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
664 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
666 OBJECT_NUM_CONFLICTS (obj
) = 0;
667 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
668 OBJECT_CONFLICT_VEC_P (obj
) = true;
671 /* Allocate and initialize the conflict bit vector of OBJ. */
673 allocate_conflict_bit_vec (ira_object_t obj
)
677 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
678 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
679 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
680 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
681 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
682 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
683 OBJECT_CONFLICT_VEC_P (obj
) = false;
686 /* Allocate and initialize the conflict vector or conflict bit vector
687 of OBJ for NUM conflicting allocnos whatever is more profitable. */
689 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
691 if (ira_conflict_vector_profitable_p (obj
, num
))
692 ira_allocate_conflict_vec (obj
, num
);
694 allocate_conflict_bit_vec (obj
);
697 /* Add OBJ2 to the conflicts of OBJ1. */
699 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
704 if (OBJECT_CONFLICT_VEC_P (obj1
))
706 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
707 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
709 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
711 ira_object_t
*newvec
;
712 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
713 newvec
= (ira_object_t
*) ira_allocate (size
);
714 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
717 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
718 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
722 OBJECT_NUM_CONFLICTS (obj1
)++;
726 int nw
, added_head_nw
, id
;
727 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
729 id
= OBJECT_CONFLICT_ID (obj2
);
730 if (OBJECT_MIN (obj1
) > id
)
732 /* Expand head of the bit vector. */
733 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
734 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
735 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
736 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
738 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
739 vec
, nw
* sizeof (IRA_INT_TYPE
));
740 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
745 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
746 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
747 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
748 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
749 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
751 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
752 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
753 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
754 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
755 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
757 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
759 else if (OBJECT_MAX (obj1
) < id
)
761 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
762 size
= nw
* sizeof (IRA_INT_TYPE
);
763 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
765 /* Expand tail of the bit vector. */
766 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
767 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
768 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
769 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
770 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
771 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
772 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
773 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
775 OBJECT_MAX (obj1
) = id
;
777 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
781 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
783 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
785 add_to_conflicts (obj1
, obj2
);
786 add_to_conflicts (obj2
, obj1
);
789 /* Clear all conflicts of OBJ. */
791 clear_conflicts (ira_object_t obj
)
793 if (OBJECT_CONFLICT_VEC_P (obj
))
795 OBJECT_NUM_CONFLICTS (obj
) = 0;
796 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
798 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
802 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
803 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
807 /* The array used to find duplications in conflict vectors of
809 static int *conflict_check
;
811 /* The value used to mark allocation presence in conflict vector of
812 the current allocno. */
813 static int curr_conflict_check_tick
;
815 /* Remove duplications in conflict vector of OBJ. */
817 compress_conflict_vec (ira_object_t obj
)
819 ira_object_t
*vec
, conflict_obj
;
822 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
823 vec
= OBJECT_CONFLICT_VEC (obj
);
824 curr_conflict_check_tick
++;
825 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
827 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
828 if (conflict_check
[id
] != curr_conflict_check_tick
)
830 conflict_check
[id
] = curr_conflict_check_tick
;
831 vec
[j
++] = conflict_obj
;
834 OBJECT_NUM_CONFLICTS (obj
) = j
;
838 /* Remove duplications in conflict vectors of all allocnos. */
840 compress_conflict_vecs (void)
843 ira_object_iterator oi
;
845 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
846 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
847 curr_conflict_check_tick
= 0;
848 FOR_EACH_OBJECT (obj
, oi
)
850 if (OBJECT_CONFLICT_VEC_P (obj
))
851 compress_conflict_vec (obj
);
853 ira_free (conflict_check
);
856 /* This recursive function outputs allocno A and if it is a cap the
857 function outputs its members. */
859 ira_print_expanded_allocno (ira_allocno_t a
)
863 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
864 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
865 fprintf (ira_dump_file
, ",b%d", bb
->index
);
867 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
868 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
870 fprintf (ira_dump_file
, ":");
871 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
873 fprintf (ira_dump_file
, ")");
876 /* Create and return the cap representing allocno A in the
879 create_cap_allocno (ira_allocno_t a
)
882 ira_loop_tree_node_t parent
;
883 enum reg_class aclass
;
885 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
886 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
887 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
888 ALLOCNO_WMODE (cap
) = ALLOCNO_WMODE (a
);
889 aclass
= ALLOCNO_CLASS (a
);
890 ira_set_allocno_class (cap
, aclass
);
891 ira_create_allocno_objects (cap
);
892 ALLOCNO_CAP_MEMBER (cap
) = a
;
893 ALLOCNO_CAP (a
) = cap
;
894 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
895 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
896 ira_allocate_and_copy_costs
897 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
898 ira_allocate_and_copy_costs
899 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
900 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
901 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
902 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
903 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
904 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
906 merge_hard_reg_conflicts (a
, cap
, false);
908 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
909 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
910 ALLOCNO_CROSSED_CALLS_ABIS (cap
) = ALLOCNO_CROSSED_CALLS_ABIS (a
);
911 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap
)
912 = ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
913 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
915 fprintf (ira_dump_file
, " Creating cap ");
916 ira_print_expanded_allocno (cap
);
917 fprintf (ira_dump_file
, "\n");
922 /* Create and return a live range for OBJECT with given attributes. */
924 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
929 p
= live_range_pool
.allocate ();
937 /* Create a new live range for OBJECT and queue it at the head of its
940 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
943 p
= ira_create_live_range (object
, start
, finish
,
944 OBJECT_LIVE_RANGES (object
));
945 OBJECT_LIVE_RANGES (object
) = p
;
948 /* Copy allocno live range R and return the result. */
950 copy_live_range (live_range_t r
)
954 p
= live_range_pool
.allocate ();
959 /* Copy allocno live range list given by its head R and return the
962 ira_copy_live_range_list (live_range_t r
)
964 live_range_t p
, first
, last
;
968 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
970 p
= copy_live_range (r
);
980 /* Merge ranges R1 and R2 and returns the result. The function
981 maintains the order of ranges and tries to minimize number of the
984 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
986 live_range_t first
, last
;
992 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
994 if (r1
->start
< r2
->start
)
996 if (r1
->start
<= r2
->finish
+ 1)
998 /* Intersected ranges: merge r1 and r2 into r1. */
999 r1
->start
= r2
->start
;
1000 if (r1
->finish
< r2
->finish
)
1001 r1
->finish
= r2
->finish
;
1002 live_range_t temp
= r2
;
1004 ira_finish_live_range (temp
);
1007 /* To try to merge with subsequent ranges in r1. */
1014 /* Add r1 to the result. */
1025 /* To try to merge with subsequent ranges in r2. */
1037 ira_assert (r1
->next
== NULL
);
1039 else if (r2
!= NULL
)
1045 ira_assert (r2
->next
== NULL
);
1049 ira_assert (last
->next
== NULL
);
1054 /* Return TRUE if live ranges R1 and R2 intersect. */
1056 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1058 /* Remember the live ranges are always kept ordered. */
1059 while (r1
!= NULL
&& r2
!= NULL
)
1061 if (r1
->start
> r2
->finish
)
1063 else if (r2
->start
> r1
->finish
)
1071 /* Free allocno live range R. */
1073 ira_finish_live_range (live_range_t r
)
1075 live_range_pool
.remove (r
);
1078 /* Free list of allocno live ranges starting with R. */
1080 ira_finish_live_range_list (live_range_t r
)
1082 live_range_t next_r
;
1084 for (; r
!= NULL
; r
= next_r
)
1087 ira_finish_live_range (r
);
1091 /* Free updated register costs of allocno A. */
1093 ira_free_allocno_updated_costs (ira_allocno_t a
)
1095 enum reg_class aclass
;
1097 aclass
= ALLOCNO_CLASS (a
);
1098 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1099 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1100 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1101 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1102 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1104 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1107 /* Free and nullify all cost vectors allocated earlier for allocno
1110 ira_free_allocno_costs (ira_allocno_t a
)
1112 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1114 ira_allocno_object_iterator oi
;
1116 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1118 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1119 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1120 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1121 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1122 object_pool
.remove (obj
);
1125 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1126 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1127 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1128 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1129 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1130 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1131 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1132 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1133 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1135 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1136 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1137 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1138 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1141 /* Free the memory allocated for allocno A. */
1143 finish_allocno (ira_allocno_t a
)
1145 ira_free_allocno_costs (a
);
1146 allocno_pool
.remove (a
);
1149 /* Free the memory allocated for all allocnos. */
1151 finish_allocnos (void)
1154 ira_allocno_iterator ai
;
1156 FOR_EACH_ALLOCNO (a
, ai
)
1158 ira_free (ira_regno_allocno_map
);
1159 ira_object_id_map_vec
.release ();
1160 allocno_vec
.release ();
1161 allocno_pool
.release ();
1162 object_pool
.release ();
1163 live_range_pool
.release ();
1168 /* Pools for allocno preferences. */
1169 static object_allocator
<ira_allocno_pref
> pref_pool ("prefs");
1171 /* Vec containing references to all created preferences. It is a
1172 container of array ira_prefs. */
1173 static vec
<ira_pref_t
> pref_vec
;
1175 /* The function initializes data concerning allocno prefs. */
1177 initiate_prefs (void)
1179 pref_vec
.create (get_max_uid ());
1184 /* Return pref for A and HARD_REGNO if any. */
1186 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1190 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1191 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1196 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1198 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1202 pref
= pref_pool
.allocate ();
1203 pref
->num
= ira_prefs_num
;
1205 pref
->hard_regno
= hard_regno
;
1207 pref_vec
.safe_push (pref
);
1208 ira_prefs
= pref_vec
.address ();
1209 ira_prefs_num
= pref_vec
.length ();
1213 /* Attach a pref PREF to the corresponding allocno. */
1215 add_allocno_pref_to_list (ira_pref_t pref
)
1217 ira_allocno_t a
= pref
->allocno
;
1219 pref
->next_pref
= ALLOCNO_PREFS (a
);
1220 ALLOCNO_PREFS (a
) = pref
;
1223 /* Create (or update frequency if the pref already exists) the pref of
1224 allocnos A preferring HARD_REGNO with frequency FREQ. */
1226 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1232 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1237 pref
= ira_create_pref (a
, hard_regno
, freq
);
1238 ira_assert (a
!= NULL
);
1239 add_allocno_pref_to_list (pref
);
1242 /* Print info about PREF into file F. */
1244 print_pref (FILE *f
, ira_pref_t pref
)
1246 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1247 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1248 pref
->hard_regno
, pref
->freq
);
1251 /* Print info about PREF into stderr. */
1253 ira_debug_pref (ira_pref_t pref
)
1255 print_pref (stderr
, pref
);
1258 /* Print info about all prefs into file F. */
1260 print_prefs (FILE *f
)
1263 ira_pref_iterator pi
;
1265 FOR_EACH_PREF (pref
, pi
)
1266 print_pref (f
, pref
);
1269 /* Print info about all prefs into stderr. */
1271 ira_debug_prefs (void)
1273 print_prefs (stderr
);
1276 /* Print info about prefs involving allocno A into file F. */
1278 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1282 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1283 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1284 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1288 /* Print info about prefs involving allocno A into stderr. */
1290 ira_debug_allocno_prefs (ira_allocno_t a
)
1292 print_allocno_prefs (stderr
, a
);
1295 /* The function frees memory allocated for PREF. */
1297 finish_pref (ira_pref_t pref
)
1299 ira_prefs
[pref
->num
] = NULL
;
1300 pref_pool
.remove (pref
);
1303 /* Remove PREF from the list of allocno prefs and free memory for
1306 ira_remove_pref (ira_pref_t pref
)
1308 ira_pref_t cpref
, prev
;
1310 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1311 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1312 pref
->num
, pref
->hard_regno
, pref
->freq
);
1313 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1315 prev
= cpref
, cpref
= cpref
->next_pref
)
1318 ira_assert (cpref
!= NULL
);
1320 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1322 prev
->next_pref
= pref
->next_pref
;
1326 /* Remove all prefs of allocno A. */
1328 ira_remove_allocno_prefs (ira_allocno_t a
)
1330 ira_pref_t pref
, next_pref
;
1332 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1334 next_pref
= pref
->next_pref
;
1337 ALLOCNO_PREFS (a
) = NULL
;
1340 /* Free memory allocated for all prefs. */
1345 ira_pref_iterator pi
;
1347 FOR_EACH_PREF (pref
, pi
)
1349 pref_vec
.release ();
1350 pref_pool
.release ();
1355 /* Pools for copies. */
1356 static object_allocator
<ira_allocno_copy
> copy_pool ("copies");
1358 /* Vec containing references to all created copies. It is a
1359 container of array ira_copies. */
1360 static vec
<ira_copy_t
> copy_vec
;
1362 /* The function initializes data concerning allocno copies. */
1364 initiate_copies (void)
1366 copy_vec
.create (get_max_uid ());
1371 /* Return copy connecting A1 and A2 and originated from INSN of
1372 LOOP_TREE_NODE if any. */
1374 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx_insn
*insn
,
1375 ira_loop_tree_node_t loop_tree_node
)
1377 ira_copy_t cp
, next_cp
;
1378 ira_allocno_t another_a
;
1380 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1382 if (cp
->first
== a1
)
1384 next_cp
= cp
->next_first_allocno_copy
;
1385 another_a
= cp
->second
;
1387 else if (cp
->second
== a1
)
1389 next_cp
= cp
->next_second_allocno_copy
;
1390 another_a
= cp
->first
;
1394 if (another_a
== a2
&& cp
->insn
== insn
1395 && cp
->loop_tree_node
== loop_tree_node
)
1401 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1402 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1404 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1405 bool constraint_p
, rtx_insn
*insn
,
1406 ira_loop_tree_node_t loop_tree_node
)
1410 cp
= copy_pool
.allocate ();
1411 cp
->num
= ira_copies_num
;
1413 cp
->second
= second
;
1415 cp
->constraint_p
= constraint_p
;
1417 cp
->loop_tree_node
= loop_tree_node
;
1418 copy_vec
.safe_push (cp
);
1419 ira_copies
= copy_vec
.address ();
1420 ira_copies_num
= copy_vec
.length ();
1424 /* Attach a copy CP to allocnos involved into the copy. */
1426 add_allocno_copy_to_list (ira_copy_t cp
)
1428 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1430 cp
->prev_first_allocno_copy
= NULL
;
1431 cp
->prev_second_allocno_copy
= NULL
;
1432 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1433 if (cp
->next_first_allocno_copy
!= NULL
)
1435 if (cp
->next_first_allocno_copy
->first
== first
)
1436 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1438 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1440 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1441 if (cp
->next_second_allocno_copy
!= NULL
)
1443 if (cp
->next_second_allocno_copy
->second
== second
)
1444 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1446 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1448 ALLOCNO_COPIES (first
) = cp
;
1449 ALLOCNO_COPIES (second
) = cp
;
1452 /* Make a copy CP a canonical copy where number of the
1453 first allocno is less than the second one. */
1455 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1457 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1460 std::swap (cp
->first
, cp
->second
);
1461 std::swap (cp
->prev_first_allocno_copy
, cp
->prev_second_allocno_copy
);
1462 std::swap (cp
->next_first_allocno_copy
, cp
->next_second_allocno_copy
);
1465 /* Create (or update frequency if the copy already exists) and return
1466 the copy of allocnos FIRST and SECOND with frequency FREQ
1467 corresponding to move insn INSN (if any) and originated from
1470 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1471 bool constraint_p
, rtx_insn
*insn
,
1472 ira_loop_tree_node_t loop_tree_node
)
1476 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1481 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1483 ira_assert (first
!= NULL
&& second
!= NULL
);
1484 add_allocno_copy_to_list (cp
);
1485 swap_allocno_copy_ends_if_necessary (cp
);
1489 /* Print info about copy CP into file F. */
1491 print_copy (FILE *f
, ira_copy_t cp
)
1493 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1494 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1495 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1497 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1501 debug (ira_allocno_copy
&ref
)
1503 print_copy (stderr
, &ref
);
1507 debug (ira_allocno_copy
*ptr
)
1512 fprintf (stderr
, "<nil>\n");
1515 /* Print info about copy CP into stderr. */
1517 ira_debug_copy (ira_copy_t cp
)
1519 print_copy (stderr
, cp
);
1522 /* Print info about all copies into file F. */
1524 print_copies (FILE *f
)
1527 ira_copy_iterator ci
;
1529 FOR_EACH_COPY (cp
, ci
)
1533 /* Print info about all copies into stderr. */
1535 ira_debug_copies (void)
1537 print_copies (stderr
);
1540 /* Print info about copies involving allocno A into file F. */
1542 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1544 ira_allocno_t another_a
;
1545 ira_copy_t cp
, next_cp
;
1547 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1548 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1552 next_cp
= cp
->next_first_allocno_copy
;
1553 another_a
= cp
->second
;
1555 else if (cp
->second
== a
)
1557 next_cp
= cp
->next_second_allocno_copy
;
1558 another_a
= cp
->first
;
1562 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1563 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1569 debug (ira_allocno
&ref
)
1571 print_allocno_copies (stderr
, &ref
);
1575 debug (ira_allocno
*ptr
)
1580 fprintf (stderr
, "<nil>\n");
1584 /* Print info about copies involving allocno A into stderr. */
1586 ira_debug_allocno_copies (ira_allocno_t a
)
1588 print_allocno_copies (stderr
, a
);
1591 /* The function frees memory allocated for copy CP. */
1593 finish_copy (ira_copy_t cp
)
1595 copy_pool
.remove (cp
);
1599 /* Free memory allocated for all copies. */
1601 finish_copies (void)
1604 ira_copy_iterator ci
;
1606 FOR_EACH_COPY (cp
, ci
)
1608 copy_vec
.release ();
1609 copy_pool
.release ();
1614 /* Pools for cost vectors. It is defined only for allocno classes. */
1615 static pool_allocator
*cost_vector_pool
[N_REG_CLASSES
];
1617 /* The function initiates work with hard register cost vectors. It
1618 creates allocation pool for each allocno class. */
1620 initiate_cost_vectors (void)
1623 enum reg_class aclass
;
1625 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1627 aclass
= ira_allocno_classes
[i
];
1628 cost_vector_pool
[aclass
] = new pool_allocator
1629 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num
[aclass
]));
1633 /* Allocate and return a cost vector VEC for ACLASS. */
1635 ira_allocate_cost_vector (reg_class_t aclass
)
1637 return (int*) cost_vector_pool
[(int) aclass
]->allocate ();
1640 /* Free a cost vector VEC for ACLASS. */
1642 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1644 ira_assert (vec
!= NULL
);
1645 cost_vector_pool
[(int) aclass
]->remove (vec
);
1648 /* Finish work with hard register cost vectors. Release allocation
1649 pool for each allocno class. */
1651 finish_cost_vectors (void)
1654 enum reg_class aclass
;
1656 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1658 aclass
= ira_allocno_classes
[i
];
1659 delete cost_vector_pool
[aclass
];
1665 /* Compute a post-ordering of the reverse control flow of the loop body
1666 designated by the children nodes of LOOP_NODE, whose body nodes in
1667 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1668 of the reverse loop body.
1670 For the post-order of the reverse CFG, we visit the basic blocks in
1671 LOOP_PREORDER array in the reverse order of where they appear.
1672 This is important: We do not just want to compute a post-order of
1673 the reverse CFG, we want to make a best-guess for a visiting order that
1674 minimizes the number of chain elements per allocno live range. If the
1675 blocks would be visited in a different order, we would still compute a
1676 correct post-ordering but it would be less likely that two nodes
1677 connected by an edge in the CFG are neighbors in the topsort. */
1679 static vec
<ira_loop_tree_node_t
>
1680 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1681 const vec
<ira_loop_tree_node_t
> &loop_preorder
)
1683 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1684 unsigned int n_loop_preorder
;
1686 n_loop_preorder
= loop_preorder
.length ();
1687 if (n_loop_preorder
!= 0)
1689 ira_loop_tree_node_t subloop_node
;
1691 auto_vec
<ira_loop_tree_node_t
> dfs_stack
;
1693 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1694 the flag to mark blocks we still have to visit to add them to
1695 our post-order. Define an alias to avoid confusion. */
1696 #define BB_TO_VISIT BB_VISITED
1698 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1700 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1701 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1704 topsort_nodes
.create (n_loop_preorder
);
1705 dfs_stack
.create (n_loop_preorder
);
1707 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1709 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1712 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1713 dfs_stack
.quick_push (subloop_node
);
1714 while (! dfs_stack
.is_empty ())
1719 ira_loop_tree_node_t n
= dfs_stack
.last ();
1720 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1722 ira_loop_tree_node_t pred_node
;
1723 basic_block pred_bb
= e
->src
;
1725 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1728 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1730 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1732 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1733 dfs_stack
.quick_push (pred_node
);
1736 if (n
== dfs_stack
.last ())
1739 topsort_nodes
.quick_push (n
);
1747 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1748 return topsort_nodes
;
1751 /* The current loop tree node and its regno allocno map. */
1752 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1753 ira_allocno_t
*ira_curr_regno_allocno_map
;
1755 /* This recursive function traverses loop tree with root LOOP_NODE
1756 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1757 correspondingly in preorder and postorder. The function sets up
1758 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1759 basic block nodes of LOOP_NODE is also processed (before its
1762 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1763 the loop are passed in the *reverse* post-order of the *reverse*
1764 CFG. This is only used by ira_create_allocno_live_ranges, which
1765 wants to visit basic blocks in this order to minimize the number
1766 of elements per live range chain.
1767 Note that the loop tree nodes are still visited in the normal,
1768 forward post-order of the loop tree. */
1771 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1772 void (*preorder_func
) (ira_loop_tree_node_t
),
1773 void (*postorder_func
) (ira_loop_tree_node_t
))
1775 ira_loop_tree_node_t subloop_node
;
1777 ira_assert (loop_node
->bb
== NULL
);
1778 ira_curr_loop_tree_node
= loop_node
;
1779 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1781 if (preorder_func
!= NULL
)
1782 (*preorder_func
) (loop_node
);
1786 auto_vec
<ira_loop_tree_node_t
> loop_preorder
;
1789 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1790 is set up such that nodes in the loop body appear in a pre-order
1791 of their place in the CFG. */
1792 for (subloop_node
= loop_node
->children
;
1793 subloop_node
!= NULL
;
1794 subloop_node
= subloop_node
->next
)
1795 if (subloop_node
->bb
!= NULL
)
1796 loop_preorder
.safe_push (subloop_node
);
1798 if (preorder_func
!= NULL
)
1799 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1800 (*preorder_func
) (subloop_node
);
1802 if (postorder_func
!= NULL
)
1804 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1805 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1806 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1807 (*postorder_func
) (subloop_node
);
1808 loop_rev_postorder
.release ();
1812 for (subloop_node
= loop_node
->subloops
;
1813 subloop_node
!= NULL
;
1814 subloop_node
= subloop_node
->subloop_next
)
1816 ira_assert (subloop_node
->bb
== NULL
);
1817 ira_traverse_loop_tree (bb_p
, subloop_node
,
1818 preorder_func
, postorder_func
);
1821 ira_curr_loop_tree_node
= loop_node
;
1822 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1824 if (postorder_func
!= NULL
)
1825 (*postorder_func
) (loop_node
);
1830 /* The basic block currently being processed. */
1831 static basic_block curr_bb
;
1833 /* This recursive function creates allocnos corresponding to
1834 pseudo-registers containing in X. True OUTPUT_P means that X is
1835 an lvalue. OUTER corresponds to the parent expression of X. */
1837 create_insn_allocnos (rtx x
, rtx outer
, bool output_p
)
1841 enum rtx_code code
= GET_CODE (x
);
1847 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1851 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1853 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1854 if (outer
!= NULL
&& GET_CODE (outer
) == SUBREG
)
1856 machine_mode wmode
= GET_MODE (outer
);
1857 if (partial_subreg_p (ALLOCNO_WMODE (a
), wmode
))
1858 ALLOCNO_WMODE (a
) = wmode
;
1862 ALLOCNO_NREFS (a
)++;
1863 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1865 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1869 else if (code
== SET
)
1871 create_insn_allocnos (SET_DEST (x
), NULL
, true);
1872 create_insn_allocnos (SET_SRC (x
), NULL
, false);
1875 else if (code
== CLOBBER
)
1877 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1880 else if (code
== MEM
)
1882 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1885 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1886 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1888 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1889 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1893 fmt
= GET_RTX_FORMAT (code
);
1894 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1897 create_insn_allocnos (XEXP (x
, i
), x
, output_p
);
1898 else if (fmt
[i
] == 'E')
1899 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1900 create_insn_allocnos (XVECEXP (x
, i
, j
), x
, output_p
);
1904 /* Create allocnos corresponding to pseudo-registers living in the
1905 basic block represented by the corresponding loop tree node
1908 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1915 curr_bb
= bb
= bb_node
->bb
;
1916 ira_assert (bb
!= NULL
);
1917 FOR_BB_INSNS_REVERSE (bb
, insn
)
1918 if (NONDEBUG_INSN_P (insn
))
1919 create_insn_allocnos (PATTERN (insn
), NULL
, false);
1920 /* It might be a allocno living through from one subloop to
1922 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1923 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1924 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1927 /* Create allocnos corresponding to pseudo-registers living on edge E
1928 (a loop entry or exit). Also mark the allocnos as living on the
1931 create_loop_allocnos (edge e
)
1934 bitmap live_in_regs
, border_allocnos
;
1936 ira_loop_tree_node_t parent
;
1938 live_in_regs
= df_get_live_in (e
->dest
);
1939 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1940 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1941 FIRST_PSEUDO_REGISTER
, i
, bi
)
1942 if (bitmap_bit_p (live_in_regs
, i
))
1944 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1946 /* The order of creations is important for right
1947 ira_regno_allocno_map. */
1948 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1949 && parent
->regno_allocno_map
[i
] == NULL
)
1950 ira_create_allocno (i
, false, parent
);
1951 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1953 bitmap_set_bit (border_allocnos
,
1954 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1958 /* Create allocnos corresponding to pseudo-registers living in loop
1959 represented by the corresponding loop tree node LOOP_NODE. This
1960 function is called by ira_traverse_loop_tree. */
1962 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1964 if (loop_node
->bb
!= NULL
)
1965 create_bb_allocnos (loop_node
);
1966 else if (loop_node
!= ira_loop_tree_root
)
1972 ira_assert (current_loops
!= NULL
);
1973 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1974 if (e
->src
!= loop_node
->loop
->latch
)
1975 create_loop_allocnos (e
);
1977 auto_vec
<edge
> edges
= get_loop_exit_edges (loop_node
->loop
);
1978 FOR_EACH_VEC_ELT (edges
, i
, e
)
1979 create_loop_allocnos (e
);
1983 /* Propagate information about allocnos modified inside the loop given
1984 by its LOOP_TREE_NODE to its parent. */
1986 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
1988 if (loop_tree_node
== ira_loop_tree_root
)
1990 ira_assert (loop_tree_node
->bb
== NULL
);
1991 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
1992 loop_tree_node
->modified_regnos
);
1995 /* Propagate ALLOCNO_HARD_REG_COSTS from A to PARENT_A. Use SPILL_COST
1996 as the cost of spilling a register throughout A (which we have to do
1997 for PARENT_A allocations that conflict with A). */
1999 ira_propagate_hard_reg_costs (ira_allocno_t parent_a
, ira_allocno_t a
,
2002 HARD_REG_SET conflicts
= ira_total_conflict_hard_regs (a
);
2003 if (ira_caller_save_loop_spill_p (parent_a
, a
, spill_cost
))
2004 conflicts
|= ira_need_caller_save_regs (a
);
2005 conflicts
&= ~ira_total_conflict_hard_regs (parent_a
);
2007 auto costs
= ALLOCNO_HARD_REG_COSTS (a
);
2008 if (!hard_reg_set_empty_p (conflicts
))
2009 ALLOCNO_MIGHT_CONFLICT_WITH_PARENT_P (a
) = true;
2013 auto aclass
= ALLOCNO_CLASS (a
);
2014 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (parent_a
),
2015 aclass
, ALLOCNO_CLASS_COST (parent_a
));
2016 auto parent_costs
= ALLOCNO_HARD_REG_COSTS (parent_a
);
2017 for (int i
= 0; i
< ira_class_hard_regs_num
[aclass
]; ++i
)
2018 if (TEST_HARD_REG_BIT (conflicts
, ira_class_hard_regs
[aclass
][i
]))
2019 parent_costs
[i
] += spill_cost
;
2021 /* The cost to A of allocating this register to PARENT_A can't
2022 be more than the cost of spilling the register throughout A. */
2023 parent_costs
[i
] += MIN (costs
[i
], spill_cost
);
2026 /* Propagate new info about allocno A (see comments about accumulated
2027 info in allocno definition) to the corresponding allocno on upper
2028 loop tree level. So allocnos on upper levels accumulate
2029 information about the corresponding allocnos in nested regions.
2030 The new info means allocno info finally calculated in this
2033 propagate_allocno_info (void)
2036 ira_allocno_t a
, parent_a
;
2037 ira_loop_tree_node_t parent
;
2038 enum reg_class aclass
;
2040 if (flag_ira_region
!= IRA_REGION_ALL
2041 && flag_ira_region
!= IRA_REGION_MIXED
)
2043 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2044 for (a
= ira_regno_allocno_map
[i
];
2046 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2047 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2048 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2049 /* There are no caps yet at this point. So use
2050 border_allocnos to find allocnos for the propagation. */
2051 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2054 /* Calculate the cost of storing to memory on entry to A's loop,
2055 referencing as memory within A's loop, and restoring from
2056 memory on exit from A's loop. */
2057 ira_loop_border_costs
border_costs (a
);
2058 int spill_cost
= INT_MAX
;
2059 if (ira_subloop_allocnos_can_differ_p (parent_a
))
2060 spill_cost
= (border_costs
.spill_inside_loop_cost ()
2061 + ALLOCNO_MEMORY_COST (a
));
2063 if (! ALLOCNO_BAD_SPILL_P (a
))
2064 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2065 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2066 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2068 /* If A's allocation can differ from PARENT_A's, we can if necessary
2069 spill PARENT_A on entry to A's loop and restore it afterwards.
2070 Doing that has cost SPILL_COST. */
2071 if (!ira_subloop_allocnos_can_differ_p (parent_a
))
2072 merge_hard_reg_conflicts (a
, parent_a
, true);
2074 if (!ira_caller_save_loop_spill_p (parent_a
, a
, spill_cost
))
2076 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2077 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2078 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2079 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2080 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2081 ALLOCNO_CROSSED_CALLS_ABIS (parent_a
)
2082 |= ALLOCNO_CROSSED_CALLS_ABIS (a
);
2083 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
)
2084 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
2086 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2087 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2088 aclass
= ALLOCNO_CLASS (a
);
2089 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2090 ira_propagate_hard_reg_costs (parent_a
, a
, spill_cost
);
2091 ira_allocate_and_accumulate_costs
2092 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2094 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2095 /* The cost to A of allocating a register to PARENT_A can't be
2096 more than the cost of spilling the register throughout A. */
2097 ALLOCNO_CLASS_COST (parent_a
)
2098 += MIN (ALLOCNO_CLASS_COST (a
), spill_cost
);
2099 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2103 /* Create allocnos corresponding to pseudo-registers in the current
2104 function. Traverse the loop tree for this. */
2106 create_allocnos (void)
2108 /* We need to process BB first to correctly link allocnos by member
2109 next_regno_allocno. */
2110 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2111 create_loop_tree_node_allocnos
, NULL
);
2113 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2114 propagate_modified_regnos
);
2119 /* The page contains function to remove some regions from a separate
2120 register allocation. We remove regions whose separate allocation
2121 will hardly improve the result. As a result we speed up regional
2122 register allocation. */
2124 /* The function changes the object in range list given by R to OBJ. */
2126 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2128 for (; r
!= NULL
; r
= r
->next
)
2132 /* Move all live ranges associated with allocno FROM to allocno TO. */
2134 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2137 int n
= ALLOCNO_NUM_OBJECTS (from
);
2139 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2141 for (i
= 0; i
< n
; i
++)
2143 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2144 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2145 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2147 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2149 fprintf (ira_dump_file
,
2150 " Moving ranges of a%dr%d to a%dr%d: ",
2151 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2152 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2153 ira_print_live_range_list (ira_dump_file
, lr
);
2155 change_object_in_range_list (lr
, to_obj
);
2156 OBJECT_LIVE_RANGES (to_obj
)
2157 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2158 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2163 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2166 int n
= ALLOCNO_NUM_OBJECTS (from
);
2168 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2170 for (i
= 0; i
< n
; i
++)
2172 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2173 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2174 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2176 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2178 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2179 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2180 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2181 ira_print_live_range_list (ira_dump_file
, lr
);
2183 lr
= ira_copy_live_range_list (lr
);
2184 change_object_in_range_list (lr
, to_obj
);
2185 OBJECT_LIVE_RANGES (to_obj
)
2186 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2190 /* Return TRUE if NODE represents a loop with low register
2193 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2196 enum reg_class pclass
;
2198 if (node
->bb
!= NULL
)
2201 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2203 pclass
= ira_pressure_classes
[i
];
2204 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2205 && ira_class_hard_regs_num
[pclass
] > 1)
2212 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2213 form a region from such loop if the target use stack register
2214 because reg-stack.cc cannot deal with such edges. */
2216 loop_with_complex_edge_p (class loop
*loop
)
2223 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2224 if (e
->flags
& EDGE_EH
)
2226 auto_vec
<edge
> edges
= get_loop_exit_edges (loop
);
2228 FOR_EACH_VEC_ELT (edges
, i
, e
)
2229 if (e
->flags
& EDGE_COMPLEX
)
2238 /* Sort loops for marking them for removal. We put already marked
2239 loops first, then less frequent loops next, and then outer loops
2242 loop_compare_func (const void *v1p
, const void *v2p
)
2245 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2246 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2248 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2249 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2251 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2253 if ((diff
= l1
->loop
->header
->count
.to_frequency (cfun
)
2254 - l2
->loop
->header
->count
.to_frequency (cfun
)) != 0)
2256 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2258 /* Make sorting stable. */
2259 return l1
->loop_num
- l2
->loop_num
;
2262 /* Mark loops which should be removed from regional allocation. We
2263 remove a loop with low register pressure inside another loop with
2264 register pressure. In this case a separate allocation of the loop
2265 hardly helps (for irregular register file architecture it could
2266 help by choosing a better hard register in the loop but we prefer
2267 faster allocation even in this case). We also remove cheap loops
2268 if there are more than param_ira_max_loops_num of them. Loop with EH
2269 exit or enter edges are removed too because the allocation might
2270 require put pseudo moves on the EH edges (we could still do this
2271 for pseudos with caller saved hard registers in some cases but it
2272 is impossible to say here or during top-down allocation pass what
2273 hard register the pseudos get finally). */
2275 mark_loops_for_removal (void)
2278 ira_loop_tree_node_t
*sorted_loops
;
2281 ira_assert (current_loops
!= NULL
);
2283 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2284 * number_of_loops (cfun
));
2285 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2286 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2288 if (ira_loop_nodes
[i
].parent
== NULL
)
2290 /* Don't remove the root. */
2291 ira_loop_nodes
[i
].to_remove_p
= false;
2294 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2295 ira_loop_nodes
[i
].to_remove_p
2296 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2297 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2299 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2303 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2304 for (i
= 0; i
< n
- param_ira_max_loops_num
; i
++)
2306 sorted_loops
[i
]->to_remove_p
= true;
2307 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2310 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2311 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2312 sorted_loops
[i
]->loop
->header
->count
.to_frequency (cfun
),
2313 loop_depth (sorted_loops
[i
]->loop
),
2314 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2315 && low_pressure_loop_node_p (sorted_loops
[i
])
2316 ? "low pressure" : "cheap loop");
2318 ira_free (sorted_loops
);
2321 /* Mark all loops but root for removing. */
2323 mark_all_loops_for_removal (void)
2328 ira_assert (current_loops
!= NULL
);
2329 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2330 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2332 if (ira_loop_nodes
[i
].parent
== NULL
)
2334 /* Don't remove the root. */
2335 ira_loop_nodes
[i
].to_remove_p
= false;
2338 ira_loop_nodes
[i
].to_remove_p
= true;
2339 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2342 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2343 ira_loop_nodes
[i
].loop_num
,
2344 ira_loop_nodes
[i
].loop
->header
->index
,
2345 ira_loop_nodes
[i
].loop
->header
->count
.to_frequency (cfun
),
2346 loop_depth (ira_loop_nodes
[i
].loop
));
2350 /* Definition of vector of loop tree nodes. */
2352 /* Vec containing references to all removed loop tree nodes. */
2353 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2355 /* Vec containing references to all children of loop tree nodes. */
2356 static vec
<ira_loop_tree_node_t
> children_vec
;
2358 /* Remove subregions of NODE if their separate allocation will not
2359 improve the result. */
2361 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2365 ira_loop_tree_node_t subnode
;
2367 remove_p
= node
->to_remove_p
;
2369 children_vec
.safe_push (node
);
2370 start
= children_vec
.length ();
2371 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2372 if (subnode
->bb
== NULL
)
2373 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2375 children_vec
.safe_push (subnode
);
2376 node
->children
= node
->subloops
= NULL
;
2379 removed_loop_vec
.safe_push (node
);
2382 while (children_vec
.length () > start
)
2384 subnode
= children_vec
.pop ();
2385 subnode
->parent
= node
;
2386 subnode
->next
= node
->children
;
2387 node
->children
= subnode
;
2388 if (subnode
->bb
== NULL
)
2390 subnode
->subloop_next
= node
->subloops
;
2391 node
->subloops
= subnode
;
2396 /* Return TRUE if NODE is inside PARENT. */
2398 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2400 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2406 /* Sort allocnos according to their order in regno allocno list. */
2408 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2410 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2411 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2412 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2413 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2415 if (loop_is_inside_p (n1
, n2
))
2417 else if (loop_is_inside_p (n2
, n1
))
2419 /* If allocnos are equally good, sort by allocno numbers, so that
2420 the results of qsort leave nothing to chance. We put allocnos
2421 with higher number first in the list because it is the original
2422 order for allocnos from loops on the same levels. */
2423 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2426 /* This array is used to sort allocnos to restore allocno order in
2427 the regno allocno list. */
2428 static ira_allocno_t
*regno_allocnos
;
2430 /* Restore allocno order for REGNO in the regno allocno list. */
2432 ira_rebuild_regno_allocno_list (int regno
)
2437 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2439 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2440 regno_allocnos
[n
++] = a
;
2442 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2443 regno_allocno_order_compare_func
);
2444 for (i
= 1; i
< n
; i
++)
2445 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2446 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2447 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2448 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2449 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2452 /* Propagate info from allocno FROM_A to allocno A. */
2454 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2456 enum reg_class aclass
;
2458 merge_hard_reg_conflicts (from_a
, a
, false);
2459 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2460 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2461 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2462 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2463 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2464 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2465 ALLOCNO_CROSSED_CALLS_ABIS (a
) |= ALLOCNO_CROSSED_CALLS_ABIS (from_a
);
2466 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
)
2467 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a
);
2469 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2470 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2471 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2472 ALLOCNO_BAD_SPILL_P (a
) = false;
2473 aclass
= ALLOCNO_CLASS (from_a
);
2474 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2475 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2476 ALLOCNO_HARD_REG_COSTS (from_a
));
2477 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2479 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2480 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2481 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2484 /* Remove allocnos from loops removed from the allocation
2487 remove_unnecessary_allocnos (void)
2490 bool merged_p
, rebuild_p
;
2491 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2492 ira_loop_tree_node_t a_node
, parent
;
2495 regno_allocnos
= NULL
;
2496 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2499 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2503 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2504 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2505 if (! a_node
->to_remove_p
)
2509 for (parent
= a_node
->parent
;
2510 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2511 && parent
->to_remove_p
;
2512 parent
= parent
->parent
)
2514 if (parent_a
== NULL
)
2516 /* There are no allocnos with the same regno in
2517 upper region -- just move the allocno to the
2520 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2521 parent
->regno_allocno_map
[regno
] = a
;
2522 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2527 /* Remove the allocno and update info of allocno in
2528 the upper region. */
2530 ira_regno_allocno_map
[regno
] = next_a
;
2532 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2533 move_allocno_live_ranges (a
, parent_a
);
2535 propagate_some_info_from_allocno (parent_a
, a
);
2536 /* Remove it from the corresponding regno allocno
2537 map to avoid info propagation of subsequent
2538 allocno into this already removed allocno. */
2539 a_node
->regno_allocno_map
[regno
] = NULL
;
2540 ira_remove_allocno_prefs (a
);
2546 /* We need to restore the order in regno allocno list. */
2548 if (regno_allocnos
== NULL
)
2550 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2551 * ira_allocnos_num
);
2552 ira_rebuild_regno_allocno_list (regno
);
2556 ira_rebuild_start_finish_chains ();
2557 if (regno_allocnos
!= NULL
)
2558 ira_free (regno_allocnos
);
2561 /* Remove allocnos from all loops but the root. */
2563 remove_low_level_allocnos (void)
2566 bool merged_p
, propagate_p
;
2567 ira_allocno_t a
, top_a
;
2568 ira_loop_tree_node_t a_node
, parent
;
2569 ira_allocno_iterator ai
;
2572 FOR_EACH_ALLOCNO (a
, ai
)
2574 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2575 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2577 regno
= ALLOCNO_REGNO (a
);
2578 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2580 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2581 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2584 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2585 /* Remove the allocno and update info of allocno in the upper
2587 move_allocno_live_ranges (a
, top_a
);
2590 propagate_some_info_from_allocno (top_a
, a
);
2592 FOR_EACH_ALLOCNO (a
, ai
)
2594 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2595 if (a_node
== ira_loop_tree_root
)
2597 parent
= a_node
->parent
;
2598 regno
= ALLOCNO_REGNO (a
);
2599 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2600 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2601 else if (ALLOCNO_CAP (a
) == NULL
)
2602 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2604 FOR_EACH_ALLOCNO (a
, ai
)
2606 regno
= ALLOCNO_REGNO (a
);
2607 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2610 ira_allocno_object_iterator oi
;
2612 ira_regno_allocno_map
[regno
] = a
;
2613 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2614 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2615 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2616 OBJECT_CONFLICT_HARD_REGS (obj
)
2617 = OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
);
2619 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2620 ALLOCNO_NO_STACK_REG_P (a
) = true;
2625 ira_remove_allocno_prefs (a
);
2630 ira_rebuild_start_finish_chains ();
2633 /* Remove loops from consideration. We remove all loops except for
2634 root if ALL_P or loops for which a separate allocation will not
2635 improve the result. We have to do this after allocno creation and
2636 their costs and allocno class evaluation because only after that
2637 the register pressure can be known and is calculated. */
2639 remove_unnecessary_regions (bool all_p
)
2641 if (current_loops
== NULL
)
2644 mark_all_loops_for_removal ();
2646 mark_loops_for_removal ();
2647 children_vec
.create (last_basic_block_for_fn (cfun
)
2648 + number_of_loops (cfun
));
2649 removed_loop_vec
.create (last_basic_block_for_fn (cfun
)
2650 + number_of_loops (cfun
));
2651 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2652 children_vec
.release ();
2654 remove_low_level_allocnos ();
2656 remove_unnecessary_allocnos ();
2657 while (removed_loop_vec
.length () > 0)
2658 finish_loop_tree_node (removed_loop_vec
.pop ());
2659 removed_loop_vec
.release ();
2664 /* At this point true value of allocno attribute bad_spill_p means
2665 that there is an insn where allocno occurs and where the allocno
2666 cannot be used as memory. The function updates the attribute, now
2667 it can be true only for allocnos which cannot be used as memory in
2668 an insn and in whose live ranges there is other allocno deaths.
2669 Spilling allocnos with true value will not improve the code because
2670 it will not make other allocnos colorable and additional reloads
2671 for the corresponding pseudo will be generated in reload pass for
2672 each insn it occurs.
2674 This is a trick mentioned in one classic article of Chaitin etc
2675 which is frequently omitted in other implementations of RA based on
2678 update_bad_spill_attribute (void)
2682 ira_allocno_iterator ai
;
2683 ira_allocno_object_iterator aoi
;
2686 enum reg_class aclass
;
2687 bitmap_head dead_points
[N_REG_CLASSES
];
2689 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2691 aclass
= ira_allocno_classes
[i
];
2692 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2694 FOR_EACH_ALLOCNO (a
, ai
)
2696 aclass
= ALLOCNO_CLASS (a
);
2697 if (aclass
== NO_REGS
)
2699 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2700 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2701 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2703 FOR_EACH_ALLOCNO (a
, ai
)
2705 aclass
= ALLOCNO_CLASS (a
);
2706 if (aclass
== NO_REGS
)
2708 if (! ALLOCNO_BAD_SPILL_P (a
))
2710 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2712 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2714 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2715 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2722 ALLOCNO_BAD_SPILL_P (a
) = false;
2727 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2729 aclass
= ira_allocno_classes
[i
];
2730 bitmap_clear (&dead_points
[aclass
]);
2736 /* Set up minimal and maximal live range points for allocnos. */
2738 setup_min_max_allocno_live_range_point (void)
2741 ira_allocno_t a
, parent_a
, cap
;
2742 ira_allocno_iterator ai
;
2743 #ifdef ENABLE_IRA_CHECKING
2744 ira_object_iterator oi
;
2748 ira_loop_tree_node_t parent
;
2750 FOR_EACH_ALLOCNO (a
, ai
)
2752 int n
= ALLOCNO_NUM_OBJECTS (a
);
2754 for (i
= 0; i
< n
; i
++)
2756 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2757 r
= OBJECT_LIVE_RANGES (obj
);
2760 OBJECT_MAX (obj
) = r
->finish
;
2761 for (; r
->next
!= NULL
; r
= r
->next
)
2763 OBJECT_MIN (obj
) = r
->start
;
2766 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2767 for (a
= ira_regno_allocno_map
[i
];
2769 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2772 int n
= ALLOCNO_NUM_OBJECTS (a
);
2774 for (j
= 0; j
< n
; j
++)
2776 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2777 ira_object_t parent_obj
;
2779 if (OBJECT_MAX (obj
) < 0)
2781 /* The object is not used and hence does not live. */
2782 ira_assert (OBJECT_LIVE_RANGES (obj
) == NULL
);
2783 OBJECT_MAX (obj
) = 0;
2784 OBJECT_MIN (obj
) = 1;
2787 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2788 /* Accumulation of range info. */
2789 if (ALLOCNO_CAP (a
) != NULL
)
2791 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2793 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2794 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2795 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2796 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2797 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2801 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2803 parent_a
= parent
->regno_allocno_map
[i
];
2804 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2805 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2806 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2807 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2808 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2811 #ifdef ENABLE_IRA_CHECKING
2812 FOR_EACH_OBJECT (obj
, oi
)
2814 if ((OBJECT_MIN (obj
) >= 0 && OBJECT_MIN (obj
) <= ira_max_point
)
2815 && (OBJECT_MAX (obj
) >= 0 && OBJECT_MAX (obj
) <= ira_max_point
))
2822 /* Sort allocnos according to their live ranges. Allocnos with
2823 smaller allocno class are put first unless we use priority
2824 coloring. Allocnos with the same class are ordered according
2825 their start (min). Allocnos with the same start are ordered
2826 according their finish (max). */
2828 object_range_compare_func (const void *v1p
, const void *v2p
)
2831 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2832 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2833 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2834 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2836 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2838 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2840 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2843 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2845 sort_conflict_id_map (void)
2849 ira_allocno_iterator ai
;
2852 FOR_EACH_ALLOCNO (a
, ai
)
2854 ira_allocno_object_iterator oi
;
2857 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2858 ira_object_id_map
[num
++] = obj
;
2861 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2862 object_range_compare_func
);
2863 for (i
= 0; i
< num
; i
++)
2865 ira_object_t obj
= ira_object_id_map
[i
];
2867 gcc_assert (obj
!= NULL
);
2868 OBJECT_CONFLICT_ID (obj
) = i
;
2870 for (i
= num
; i
< ira_objects_num
; i
++)
2871 ira_object_id_map
[i
] = NULL
;
2874 /* Set up minimal and maximal conflict ids of allocnos with which
2875 given allocno can conflict. */
2877 setup_min_max_conflict_allocno_ids (void)
2880 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2881 int *live_range_min
, *last_lived
;
2882 int word0_min
, word0_max
;
2884 ira_allocno_iterator ai
;
2886 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2888 first_not_finished
= -1;
2889 for (i
= 0; i
< ira_objects_num
; i
++)
2891 ira_object_t obj
= ira_object_id_map
[i
];
2896 a
= OBJECT_ALLOCNO (obj
);
2900 aclass
= ALLOCNO_CLASS (a
);
2902 first_not_finished
= i
;
2906 start
= OBJECT_MIN (obj
);
2907 /* If we skip an allocno, the allocno with smaller ids will
2908 be also skipped because of the secondary sorting the
2909 range finishes (see function
2910 object_range_compare_func). */
2911 while (first_not_finished
< i
2912 && start
> OBJECT_MAX (ira_object_id_map
2913 [first_not_finished
]))
2914 first_not_finished
++;
2915 min
= first_not_finished
;
2918 /* We could increase min further in this case but it is good
2921 live_range_min
[i
] = OBJECT_MIN (obj
);
2922 OBJECT_MIN (obj
) = min
;
2924 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2926 filled_area_start
= -1;
2927 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2929 ira_object_t obj
= ira_object_id_map
[i
];
2934 a
= OBJECT_ALLOCNO (obj
);
2937 aclass
= ALLOCNO_CLASS (a
);
2938 for (j
= 0; j
< ira_max_point
; j
++)
2940 filled_area_start
= ira_max_point
;
2942 min
= live_range_min
[i
];
2943 finish
= OBJECT_MAX (obj
);
2944 max
= last_lived
[finish
];
2946 /* We could decrease max further in this case but it is good
2948 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2949 OBJECT_MAX (obj
) = max
;
2950 /* In filling, we can go further A range finish to recognize
2951 intersection quickly because if the finish of subsequently
2952 processed allocno (it has smaller conflict id) range is
2953 further A range finish than they are definitely intersected
2954 (the reason for this is the allocnos with bigger conflict id
2955 have their range starts not smaller than allocnos with
2957 for (j
= min
; j
< filled_area_start
; j
++)
2959 filled_area_start
= min
;
2961 ira_free (last_lived
);
2962 ira_free (live_range_min
);
2964 /* For allocnos with more than one object, we may later record extra conflicts in
2965 subobject 0 that we cannot really know about here.
2966 For now, simply widen the min/max range of these subobjects. */
2968 word0_min
= INT_MAX
;
2969 word0_max
= INT_MIN
;
2971 FOR_EACH_ALLOCNO (a
, ai
)
2973 int n
= ALLOCNO_NUM_OBJECTS (a
);
2978 obj0
= ALLOCNO_OBJECT (a
, 0);
2979 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2980 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2981 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2982 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2984 FOR_EACH_ALLOCNO (a
, ai
)
2986 int n
= ALLOCNO_NUM_OBJECTS (a
);
2991 obj0
= ALLOCNO_OBJECT (a
, 0);
2992 if (OBJECT_MIN (obj0
) > word0_min
)
2993 OBJECT_MIN (obj0
) = word0_min
;
2994 if (OBJECT_MAX (obj0
) < word0_max
)
2995 OBJECT_MAX (obj0
) = word0_max
;
3005 ira_allocno_iterator ai
;
3006 ira_loop_tree_node_t loop_tree_node
;
3008 FOR_EACH_ALLOCNO (a
, ai
)
3010 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
3012 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3013 create_cap_allocno (a
);
3014 else if (ALLOCNO_CAP (a
) == NULL
)
3016 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3017 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
3018 create_cap_allocno (a
);
3025 /* The page contains code transforming more one region internal
3026 representation (IR) to one region IR which is necessary for reload.
3027 This transformation is called IR flattening. We might just rebuild
3028 the IR for one region but we don't do it because it takes a lot of
3031 /* Map: regno -> allocnos which will finally represent the regno for
3032 IR with one region. */
3033 static ira_allocno_t
*regno_top_level_allocno_map
;
3035 /* Find the allocno that corresponds to A at a level one higher up in the
3036 loop tree. Returns NULL if A is a cap, or if it has no parent. */
3038 ira_parent_allocno (ira_allocno_t a
)
3040 ira_loop_tree_node_t parent
;
3042 if (ALLOCNO_CAP (a
) != NULL
)
3045 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3049 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3052 /* Find the allocno that corresponds to A at a level one higher up in the
3053 loop tree. If ALLOCNO_CAP is set for A, return that. */
3055 ira_parent_or_cap_allocno (ira_allocno_t a
)
3057 if (ALLOCNO_CAP (a
) != NULL
)
3058 return ALLOCNO_CAP (a
);
3060 return ira_parent_allocno (a
);
3063 /* Process all allocnos originated from pseudo REGNO and copy live
3064 ranges, hard reg conflicts, and allocno stack reg attributes from
3065 low level allocnos to final allocnos which are destinations of
3066 removed stores at a loop exit. Return true if we copied live
3069 copy_info_to_removed_store_destinations (int regno
)
3072 ira_allocno_t parent_a
= NULL
;
3073 ira_loop_tree_node_t parent
;
3077 for (a
= ira_regno_allocno_map
[regno
];
3079 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3081 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3082 /* This allocno will be removed. */
3085 /* Caps will be removed. */
3086 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3087 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3089 parent
= parent
->parent
)
3090 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3092 == regno_top_level_allocno_map
[REGNO
3093 (allocno_emit_reg (parent_a
))]
3094 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3096 if (parent
== NULL
|| parent_a
== NULL
)
3099 copy_allocno_live_ranges (a
, parent_a
);
3100 merge_hard_reg_conflicts (a
, parent_a
, true);
3102 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3103 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3104 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3105 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3106 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3107 ALLOCNO_CROSSED_CALLS_ABIS (parent_a
)
3108 |= ALLOCNO_CROSSED_CALLS_ABIS (a
);
3109 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
)
3110 |= ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
);
3111 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3112 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3118 /* Flatten the IR. In other words, this function transforms IR as if
3119 it were built with one region (without loops). We could make it
3120 much simpler by rebuilding IR with one region, but unfortunately it
3121 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3122 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3123 IRA_MAX_POINT before emitting insns on the loop borders. */
3125 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3130 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3132 enum reg_class aclass
;
3133 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3135 ira_loop_tree_node_t node
;
3137 ira_allocno_iterator ai
;
3138 ira_copy_iterator ci
;
3140 regno_top_level_allocno_map
3141 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3142 * sizeof (ira_allocno_t
));
3143 memset (regno_top_level_allocno_map
, 0,
3144 max_reg_num () * sizeof (ira_allocno_t
));
3145 new_pseudos_p
= merged_p
= false;
3146 FOR_EACH_ALLOCNO (a
, ai
)
3148 ira_allocno_object_iterator oi
;
3151 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3152 /* Caps are not in the regno allocno maps and they are never
3153 will be transformed into allocnos existing after IR
3156 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3157 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
)
3158 = OBJECT_CONFLICT_HARD_REGS (obj
);
3160 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3163 /* Fix final allocno attributes. */
3164 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3167 for (a
= ira_regno_allocno_map
[i
];
3169 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3171 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3173 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3174 if (data
->somewhere_renamed_p
)
3175 new_pseudos_p
= true;
3176 parent_a
= ira_parent_allocno (a
);
3177 if (parent_a
== NULL
)
3179 ALLOCNO_COPIES (a
) = NULL
;
3180 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3183 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3185 if (data
->mem_optimized_dest
!= NULL
)
3187 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3188 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3190 merge_hard_reg_conflicts (a
, parent_a
, true);
3191 move_allocno_live_ranges (a
, parent_a
);
3193 parent_data
->mem_optimized_dest_p
3194 = (parent_data
->mem_optimized_dest_p
3195 || data
->mem_optimized_dest_p
);
3198 new_pseudos_p
= true;
3201 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3202 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3203 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3204 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3205 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3206 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3207 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3208 /* Assume that ALLOCNO_CROSSED_CALLS_ABIS and
3209 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS stay the same.
3210 We'd need to rebuild the IR to do better. */
3211 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3212 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3213 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3214 && ALLOCNO_NREFS (parent_a
) >= 0
3215 && ALLOCNO_FREQ (parent_a
) >= 0);
3216 aclass
= ALLOCNO_CLASS (parent_a
);
3217 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3218 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3219 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3220 for (j
= 0; j
< hard_regs_num
; j
++)
3221 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3222 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3223 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3224 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3225 for (j
= 0; j
< hard_regs_num
; j
++)
3226 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3227 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3228 ALLOCNO_CLASS_COST (parent_a
)
3229 -= ALLOCNO_CLASS_COST (a
);
3230 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3231 parent_a
= ira_parent_allocno (parent_a
);
3232 if (parent_a
== NULL
)
3235 ALLOCNO_COPIES (a
) = NULL
;
3236 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3238 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3241 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3242 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3243 ira_rebuild_start_finish_chains ();
3246 sparseset objects_live
;
3248 /* Rebuild conflicts. */
3249 FOR_EACH_ALLOCNO (a
, ai
)
3251 ira_allocno_object_iterator oi
;
3254 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3255 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3257 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3259 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3260 ira_assert (r
->object
== obj
);
3261 clear_conflicts (obj
);
3264 objects_live
= sparseset_alloc (ira_objects_num
);
3265 for (i
= 0; i
< ira_max_point
; i
++)
3267 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3269 ira_object_t obj
= r
->object
;
3271 a
= OBJECT_ALLOCNO (obj
);
3272 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3273 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3276 aclass
= ALLOCNO_CLASS (a
);
3277 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3279 ira_object_t live_obj
= ira_object_id_map
[n
];
3280 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3281 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3283 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3284 /* Don't set up conflict for the allocno with itself. */
3286 ira_add_conflict (obj
, live_obj
);
3288 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3291 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3292 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3294 sparseset_free (objects_live
);
3295 compress_conflict_vecs ();
3297 /* Mark some copies for removing and change allocnos in the rest
3299 FOR_EACH_COPY (cp
, ci
)
3301 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3302 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3304 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3306 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3307 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3308 ALLOCNO_NUM (cp
->first
),
3309 REGNO (allocno_emit_reg (cp
->first
)),
3310 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3311 ALLOCNO_NUM (cp
->second
),
3312 REGNO (allocno_emit_reg (cp
->second
)));
3313 cp
->loop_tree_node
= NULL
;
3317 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3319 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3320 node
= cp
->loop_tree_node
;
3322 keep_p
= true; /* It copy generated in ira-emit.cc. */
3325 /* Check that the copy was not propagated from level on
3326 which we will have different pseudos. */
3327 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3328 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3329 keep_p
= ((REGNO (allocno_emit_reg (first
))
3330 == REGNO (allocno_emit_reg (node_first
)))
3331 && (REGNO (allocno_emit_reg (second
))
3332 == REGNO (allocno_emit_reg (node_second
))));
3336 cp
->loop_tree_node
= ira_loop_tree_root
;
3338 cp
->second
= second
;
3342 cp
->loop_tree_node
= NULL
;
3343 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3344 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3345 cp
->num
, ALLOCNO_NUM (cp
->first
),
3346 REGNO (allocno_emit_reg (cp
->first
)),
3347 ALLOCNO_NUM (cp
->second
),
3348 REGNO (allocno_emit_reg (cp
->second
)));
3351 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3352 FOR_EACH_ALLOCNO (a
, ai
)
3354 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3355 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3357 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3358 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3359 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3360 ira_remove_allocno_prefs (a
);
3364 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3365 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3366 ALLOCNO_CAP (a
) = NULL
;
3367 /* Restore updated costs for assignments from reload. */
3368 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3369 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3370 if (! ALLOCNO_ASSIGNED_P (a
))
3371 ira_free_allocno_updated_costs (a
);
3372 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3373 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3375 /* Remove unnecessary copies. */
3376 FOR_EACH_COPY (cp
, ci
)
3378 if (cp
->loop_tree_node
== NULL
)
3380 ira_copies
[cp
->num
] = NULL
;
3385 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3386 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3387 add_allocno_copy_to_list (cp
);
3388 swap_allocno_copy_ends_if_necessary (cp
);
3390 rebuild_regno_allocno_maps ();
3391 if (ira_max_point
!= ira_max_point_before_emit
)
3392 ira_compress_allocno_live_ranges ();
3393 ira_free (regno_top_level_allocno_map
);
3398 #ifdef ENABLE_IRA_CHECKING
3399 /* Check creation of all allocnos. Allocnos on lower levels should
3400 have allocnos or caps on all upper levels. */
3402 check_allocno_creation (void)
3405 ira_allocno_iterator ai
;
3406 ira_loop_tree_node_t loop_tree_node
;
3408 FOR_EACH_ALLOCNO (a
, ai
)
3410 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3411 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3413 if (loop_tree_node
== ira_loop_tree_root
)
3415 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3416 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3417 else if (ALLOCNO_CAP (a
) == NULL
)
3418 ira_assert (loop_tree_node
->parent
3419 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3420 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3426 /* Identify allocnos which prefer a register class with a single hard register.
3427 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3428 less likely to use the preferred singleton register. */
3430 update_conflict_hard_reg_costs (void)
3433 ira_allocno_iterator ai
;
3436 FOR_EACH_ALLOCNO (a
, ai
)
3438 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3439 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3440 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3443 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3446 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3447 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3450 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3451 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3452 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3453 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3456 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3458 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3459 -= min
- ALLOCNO_CLASS_COST (a
);
3463 /* Create a internal representation (IR) for IRA (allocnos, copies,
3464 loop tree nodes). The function returns TRUE if we generate loop
3465 structure (besides nodes representing all function and the basic
3466 blocks) for regional allocation. A true return means that we
3467 really need to flatten IR before the reload. */
3474 initiate_cost_vectors ();
3475 initiate_allocnos ();
3478 create_loop_tree_nodes ();
3482 create_allocno_objects ();
3483 ira_create_allocno_live_ranges ();
3484 remove_unnecessary_regions (false);
3485 ira_compress_allocno_live_ranges ();
3486 update_bad_spill_attribute ();
3487 loops_p
= more_one_region_p ();
3490 propagate_allocno_info ();
3493 ira_tune_allocno_costs ();
3494 #ifdef ENABLE_IRA_CHECKING
3495 check_allocno_creation ();
3497 setup_min_max_allocno_live_range_point ();
3498 sort_conflict_id_map ();
3499 setup_min_max_conflict_allocno_ids ();
3500 ira_build_conflicts ();
3501 update_conflict_hard_reg_costs ();
3502 if (! ira_conflicts_p
)
3505 ira_allocno_iterator ai
;
3507 /* Remove all regions but root one. */
3510 remove_unnecessary_regions (true);
3513 /* We don't save hard registers around calls for fast allocation
3514 -- add caller clobbered registers as conflicting ones to
3515 allocno crossing calls. */
3516 FOR_EACH_ALLOCNO (a
, ai
)
3517 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3518 ior_hard_reg_conflicts (a
, ira_need_caller_save_regs (a
));
3520 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3521 print_copies (ira_dump_file
);
3522 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3523 print_prefs (ira_dump_file
);
3524 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3529 ira_allocno_iterator ai
;
3534 FOR_EACH_ALLOCNO (a
, ai
)
3536 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3540 for (j
= 0; j
< nobj
; j
++)
3542 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3543 n
+= OBJECT_NUM_CONFLICTS (obj
);
3544 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3548 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3549 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3550 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3551 fprintf (ira_dump_file
,
3552 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3553 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3558 /* Release the data created by function ira_build. */
3562 finish_loop_tree_nodes ();
3566 finish_cost_vectors ();
3567 ira_finish_allocno_live_ranges ();