1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2018 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "insn-config.h"
35 #include "sparseset.h"
38 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx_insn
*,
39 ira_loop_tree_node_t
);
41 /* The root of the loop tree corresponding to the all function. */
42 ira_loop_tree_node_t ira_loop_tree_root
;
44 /* Height of the loop tree. */
45 int ira_loop_tree_height
;
47 /* All nodes representing basic blocks are referred through the
48 following array. We can not use basic block member `aux' for this
49 because it is used for insertion of insns on edges. */
50 ira_loop_tree_node_t ira_bb_nodes
;
52 /* All nodes representing loops are referred through the following
54 ira_loop_tree_node_t ira_loop_nodes
;
56 /* And size of the ira_loop_nodes array. */
57 unsigned int ira_loop_nodes_count
;
59 /* Map regno -> allocnos with given regno (see comments for
60 allocno member `next_regno_allocno'). */
61 ira_allocno_t
*ira_regno_allocno_map
;
63 /* Array of references to all allocnos. The order number of the
64 allocno corresponds to the index in the array. Removed allocnos
65 have NULL element value. */
66 ira_allocno_t
*ira_allocnos
;
68 /* Sizes of the previous array. */
71 /* Count of conflict record structures we've created, used when creating
75 /* Map a conflict id to its conflict record. */
76 ira_object_t
*ira_object_id_map
;
78 /* Array of references to all allocno preferences. The order number
79 of the preference corresponds to the index in the array. */
80 ira_pref_t
*ira_prefs
;
82 /* Size of the previous array. */
85 /* Array of references to all copies. The order number of the copy
86 corresponds to the index in the array. Removed copies have NULL
88 ira_copy_t
*ira_copies
;
90 /* Size of the previous array. */
95 /* LAST_BASIC_BLOCK before generating additional insns because of live
96 range splitting. Emitting insns on a critical edge creates a new
98 static int last_basic_block_before_change
;
100 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
101 the member loop_num. */
103 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
105 int max_regno
= max_reg_num ();
107 node
->regno_allocno_map
108 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
109 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
110 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
111 node
->all_allocnos
= ira_allocate_bitmap ();
112 node
->modified_regnos
= ira_allocate_bitmap ();
113 node
->border_allocnos
= ira_allocate_bitmap ();
114 node
->local_copies
= ira_allocate_bitmap ();
115 node
->loop_num
= loop_num
;
116 node
->children
= NULL
;
117 node
->subloops
= NULL
;
121 /* The following function allocates the loop tree nodes. If
122 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
123 the root which corresponds the all function) will be not allocated
124 but nodes will still be allocated for basic blocks. */
126 create_loop_tree_nodes (void)
136 = ((struct ira_loop_tree_node
*)
137 ira_allocate (sizeof (struct ira_loop_tree_node
)
138 * last_basic_block_for_fn (cfun
)));
139 last_basic_block_before_change
= last_basic_block_for_fn (cfun
);
140 for (i
= 0; i
< (unsigned int) last_basic_block_for_fn (cfun
); i
++)
142 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
143 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
144 sizeof (ira_bb_nodes
[i
].reg_pressure
));
145 ira_bb_nodes
[i
].all_allocnos
= NULL
;
146 ira_bb_nodes
[i
].modified_regnos
= NULL
;
147 ira_bb_nodes
[i
].border_allocnos
= NULL
;
148 ira_bb_nodes
[i
].local_copies
= NULL
;
150 if (current_loops
== NULL
)
152 ira_loop_nodes_count
= 1;
153 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
154 ira_allocate (sizeof (struct ira_loop_tree_node
)));
155 init_loop_tree_node (ira_loop_nodes
, 0);
158 ira_loop_nodes_count
= number_of_loops (cfun
);
159 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
160 ira_allocate (sizeof (struct ira_loop_tree_node
)
161 * ira_loop_nodes_count
));
162 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
164 if (loop_outer (loop
) != NULL
)
166 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
168 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
169 if (e
->src
!= loop
->latch
170 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
177 edges
= get_loop_exit_edges (loop
);
178 FOR_EACH_VEC_ELT (edges
, j
, e
)
179 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
188 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
192 /* The function returns TRUE if there are more one allocation
195 more_one_region_p (void)
200 if (current_loops
!= NULL
)
201 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
202 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
203 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
208 /* Free the loop tree node of a loop. */
210 finish_loop_tree_node (ira_loop_tree_node_t loop
)
212 if (loop
->regno_allocno_map
!= NULL
)
214 ira_assert (loop
->bb
== NULL
);
215 ira_free_bitmap (loop
->local_copies
);
216 ira_free_bitmap (loop
->border_allocnos
);
217 ira_free_bitmap (loop
->modified_regnos
);
218 ira_free_bitmap (loop
->all_allocnos
);
219 ira_free (loop
->regno_allocno_map
);
220 loop
->regno_allocno_map
= NULL
;
224 /* Free the loop tree nodes. */
226 finish_loop_tree_nodes (void)
230 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
231 finish_loop_tree_node (&ira_loop_nodes
[i
]);
232 ira_free (ira_loop_nodes
);
233 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
235 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
236 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
237 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
238 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
239 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
240 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
241 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
242 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
243 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
244 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
246 ira_free (ira_bb_nodes
);
251 /* The following recursive function adds LOOP to the loop tree
252 hierarchy. LOOP is added only once. If LOOP is NULL we adding
253 loop designating the whole function when CFG loops are not
256 add_loop_to_tree (struct loop
*loop
)
260 ira_loop_tree_node_t loop_node
, parent_node
;
262 /* We can not use loop node access macros here because of potential
263 checking and because the nodes are not initialized enough
265 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
266 add_loop_to_tree (loop_outer (loop
));
267 loop_num
= loop
!= NULL
? loop
->num
: 0;
268 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
269 && ira_loop_nodes
[loop_num
].children
== NULL
)
271 /* We have not added loop node to the tree yet. */
272 loop_node
= &ira_loop_nodes
[loop_num
];
273 loop_node
->loop
= loop
;
274 loop_node
->bb
= NULL
;
279 for (parent
= loop_outer (loop
);
281 parent
= loop_outer (parent
))
282 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
287 loop_node
->next
= NULL
;
288 loop_node
->subloop_next
= NULL
;
289 loop_node
->parent
= NULL
;
293 parent_node
= &ira_loop_nodes
[parent
->num
];
294 loop_node
->next
= parent_node
->children
;
295 parent_node
->children
= loop_node
;
296 loop_node
->subloop_next
= parent_node
->subloops
;
297 parent_node
->subloops
= loop_node
;
298 loop_node
->parent
= parent_node
;
303 /* The following recursive function sets up levels of nodes of the
304 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
305 The function returns maximal value of level in the tree + 1. */
307 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
309 int height
, max_height
;
310 ira_loop_tree_node_t subloop_node
;
312 ira_assert (loop_node
->bb
== NULL
);
313 loop_node
->level
= level
;
314 max_height
= level
+ 1;
315 for (subloop_node
= loop_node
->subloops
;
316 subloop_node
!= NULL
;
317 subloop_node
= subloop_node
->subloop_next
)
319 ira_assert (subloop_node
->bb
== NULL
);
320 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
321 if (height
> max_height
)
327 /* Create the loop tree. The algorithm is designed to provide correct
328 order of loops (they are ordered by their last loop BB) and basic
329 blocks in the chain formed by member next. */
331 form_loop_tree (void)
335 ira_loop_tree_node_t bb_node
, loop_node
;
337 /* We can not use loop/bb node access macros because of potential
338 checking and because the nodes are not initialized enough
340 FOR_EACH_BB_FN (bb
, cfun
)
342 bb_node
= &ira_bb_nodes
[bb
->index
];
344 bb_node
->loop
= NULL
;
345 bb_node
->subloops
= NULL
;
346 bb_node
->children
= NULL
;
347 bb_node
->subloop_next
= NULL
;
348 bb_node
->next
= NULL
;
349 if (current_loops
== NULL
)
353 for (parent
= bb
->loop_father
;
355 parent
= loop_outer (parent
))
356 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
359 add_loop_to_tree (parent
);
360 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
361 bb_node
->next
= loop_node
->children
;
362 bb_node
->parent
= loop_node
;
363 loop_node
->children
= bb_node
;
365 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
366 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
367 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
372 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
375 rebuild_regno_allocno_maps (void)
378 int max_regno
, regno
;
380 ira_loop_tree_node_t loop_tree_node
;
382 ira_allocno_iterator ai
;
384 ira_assert (current_loops
!= NULL
);
385 max_regno
= max_reg_num ();
386 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
387 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
389 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
390 ira_loop_nodes
[l
].regno_allocno_map
391 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
393 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
394 sizeof (ira_allocno_t
) * max_regno
);
396 ira_free (ira_regno_allocno_map
);
397 ira_regno_allocno_map
398 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
399 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
400 FOR_EACH_ALLOCNO (a
, ai
)
402 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
403 /* Caps are not in the regno allocno maps. */
405 regno
= ALLOCNO_REGNO (a
);
406 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
407 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
408 ira_regno_allocno_map
[regno
] = a
;
409 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
410 /* Remember that we can create temporary allocnos to break
411 cycles in register shuffle. */
412 loop_tree_node
->regno_allocno_map
[regno
] = a
;
417 /* Pools for allocnos, allocno live ranges and objects. */
418 static object_allocator
<live_range
> live_range_pool ("live ranges");
419 static object_allocator
<ira_allocno
> allocno_pool ("allocnos");
420 static object_allocator
<ira_object
> object_pool ("objects");
422 /* Vec containing references to all created allocnos. It is a
423 container of array allocnos. */
424 static vec
<ira_allocno_t
> allocno_vec
;
426 /* Vec containing references to all created ira_objects. It is a
427 container of ira_object_id_map. */
428 static vec
<ira_object_t
> ira_object_id_map_vec
;
430 /* Initialize data concerning allocnos. */
432 initiate_allocnos (void)
434 allocno_vec
.create (max_reg_num () * 2);
436 ira_allocnos_num
= 0;
438 ira_object_id_map_vec
.create (max_reg_num () * 2);
439 ira_object_id_map
= NULL
;
440 ira_regno_allocno_map
441 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
442 * sizeof (ira_allocno_t
));
443 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
446 /* Create and return an object corresponding to a new allocno A. */
448 ira_create_object (ira_allocno_t a
, int subword
)
450 enum reg_class aclass
= ALLOCNO_CLASS (a
);
451 ira_object_t obj
= object_pool
.allocate ();
453 OBJECT_ALLOCNO (obj
) = a
;
454 OBJECT_SUBWORD (obj
) = subword
;
455 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
456 OBJECT_CONFLICT_VEC_P (obj
) = false;
457 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
458 OBJECT_NUM_CONFLICTS (obj
) = 0;
459 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
460 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
461 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
462 reg_class_contents
[aclass
]);
463 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
464 reg_class_contents
[aclass
]);
465 OBJECT_MIN (obj
) = INT_MAX
;
466 OBJECT_MAX (obj
) = -1;
467 OBJECT_LIVE_RANGES (obj
) = NULL
;
469 ira_object_id_map_vec
.safe_push (obj
);
471 = ira_object_id_map_vec
.address ();
472 ira_objects_num
= ira_object_id_map_vec
.length ();
477 /* Create and return the allocno corresponding to REGNO in
478 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
479 same regno if CAP_P is FALSE. */
481 ira_create_allocno (int regno
, bool cap_p
,
482 ira_loop_tree_node_t loop_tree_node
)
486 a
= allocno_pool
.allocate ();
487 ALLOCNO_REGNO (a
) = regno
;
488 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
491 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
492 ira_regno_allocno_map
[regno
] = a
;
493 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
494 /* Remember that we can create temporary allocnos to break
495 cycles in register shuffle on region borders (see
497 loop_tree_node
->regno_allocno_map
[regno
] = a
;
499 ALLOCNO_CAP (a
) = NULL
;
500 ALLOCNO_CAP_MEMBER (a
) = NULL
;
501 ALLOCNO_NUM (a
) = ira_allocnos_num
;
502 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
503 ALLOCNO_NREFS (a
) = 0;
504 ALLOCNO_FREQ (a
) = 0;
505 ALLOCNO_HARD_REGNO (a
) = -1;
506 ALLOCNO_CALL_FREQ (a
) = 0;
507 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
508 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
509 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
511 ALLOCNO_NO_STACK_REG_P (a
) = false;
512 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
514 ALLOCNO_DONT_REASSIGN_P (a
) = false;
515 ALLOCNO_BAD_SPILL_P (a
) = false;
516 ALLOCNO_ASSIGNED_P (a
) = false;
517 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
518 ALLOCNO_WMODE (a
) = ALLOCNO_MODE (a
);
519 ALLOCNO_PREFS (a
) = NULL
;
520 ALLOCNO_COPIES (a
) = NULL
;
521 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
522 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
523 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
524 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
525 ALLOCNO_CLASS (a
) = NO_REGS
;
526 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
527 ALLOCNO_CLASS_COST (a
) = 0;
528 ALLOCNO_MEMORY_COST (a
) = 0;
529 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
530 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
531 ALLOCNO_NUM_OBJECTS (a
) = 0;
533 ALLOCNO_ADD_DATA (a
) = NULL
;
534 allocno_vec
.safe_push (a
);
535 ira_allocnos
= allocno_vec
.address ();
536 ira_allocnos_num
= allocno_vec
.length ();
541 /* Set up register class for A and update its conflict hard
544 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
546 ira_allocno_object_iterator oi
;
549 ALLOCNO_CLASS (a
) = aclass
;
550 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
552 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
553 reg_class_contents
[aclass
]);
554 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
555 reg_class_contents
[aclass
]);
559 /* Determine the number of objects we should associate with allocno A
560 and allocate them. */
562 ira_create_allocno_objects (ira_allocno_t a
)
564 machine_mode mode
= ALLOCNO_MODE (a
);
565 enum reg_class aclass
= ALLOCNO_CLASS (a
);
566 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
569 if (n
!= 2 || maybe_ne (GET_MODE_SIZE (mode
), n
* UNITS_PER_WORD
))
572 ALLOCNO_NUM_OBJECTS (a
) = n
;
573 for (i
= 0; i
< n
; i
++)
574 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
577 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
578 ALLOCNO_OBJECT structures. This must be called after the allocno
579 classes are known. */
581 create_allocno_objects (void)
584 ira_allocno_iterator ai
;
586 FOR_EACH_ALLOCNO (a
, ai
)
587 ira_create_allocno_objects (a
);
590 /* Merge hard register conflict information for all objects associated with
591 allocno TO into the corresponding objects associated with FROM.
592 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
594 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
598 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
599 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
601 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
602 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
605 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj
),
606 OBJECT_CONFLICT_HARD_REGS (from_obj
));
607 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
),
608 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
));
611 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
612 ALLOCNO_NO_STACK_REG_P (to
) = true;
613 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
614 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
618 /* Update hard register conflict information for all objects associated with
619 A to include the regs in SET. */
621 ior_hard_reg_conflicts (ira_allocno_t a
, HARD_REG_SET
*set
)
623 ira_allocno_object_iterator i
;
626 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
628 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), *set
);
629 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), *set
);
633 /* Return TRUE if a conflict vector with NUM elements is more
634 profitable than a conflict bit vector for OBJ. */
636 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
639 int max
= OBJECT_MAX (obj
);
640 int min
= OBJECT_MIN (obj
);
643 /* We prefer a bit vector in such case because it does not result
647 nw
= (max
- min
+ IRA_INT_BITS
) / IRA_INT_BITS
;
648 return (2 * sizeof (ira_object_t
) * (num
+ 1)
649 < 3 * nw
* sizeof (IRA_INT_TYPE
));
652 /* Allocates and initialize the conflict vector of OBJ for NUM
653 conflicting objects. */
655 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
660 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
661 num
++; /* for NULL end marker */
662 size
= sizeof (ira_object_t
) * num
;
663 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
664 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
666 OBJECT_NUM_CONFLICTS (obj
) = 0;
667 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
668 OBJECT_CONFLICT_VEC_P (obj
) = true;
671 /* Allocate and initialize the conflict bit vector of OBJ. */
673 allocate_conflict_bit_vec (ira_object_t obj
)
677 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
678 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
679 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
680 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
681 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
682 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
683 OBJECT_CONFLICT_VEC_P (obj
) = false;
686 /* Allocate and initialize the conflict vector or conflict bit vector
687 of OBJ for NUM conflicting allocnos whatever is more profitable. */
689 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
691 if (ira_conflict_vector_profitable_p (obj
, num
))
692 ira_allocate_conflict_vec (obj
, num
);
694 allocate_conflict_bit_vec (obj
);
697 /* Add OBJ2 to the conflicts of OBJ1. */
699 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
704 if (OBJECT_CONFLICT_VEC_P (obj1
))
706 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
707 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
709 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
711 ira_object_t
*newvec
;
712 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
713 newvec
= (ira_object_t
*) ira_allocate (size
);
714 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
717 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
718 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
722 OBJECT_NUM_CONFLICTS (obj1
)++;
726 int nw
, added_head_nw
, id
;
727 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
729 id
= OBJECT_CONFLICT_ID (obj2
);
730 if (OBJECT_MIN (obj1
) > id
)
732 /* Expand head of the bit vector. */
733 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
734 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
735 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
736 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
738 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
739 vec
, nw
* sizeof (IRA_INT_TYPE
));
740 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
745 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
746 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
747 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
748 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
749 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
751 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
752 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
753 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
754 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
755 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
757 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
759 else if (OBJECT_MAX (obj1
) < id
)
761 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
762 size
= nw
* sizeof (IRA_INT_TYPE
);
763 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
765 /* Expand tail of the bit vector. */
766 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
767 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
768 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
769 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
770 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
771 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
772 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
773 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
775 OBJECT_MAX (obj1
) = id
;
777 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
781 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
783 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
785 add_to_conflicts (obj1
, obj2
);
786 add_to_conflicts (obj2
, obj1
);
789 /* Clear all conflicts of OBJ. */
791 clear_conflicts (ira_object_t obj
)
793 if (OBJECT_CONFLICT_VEC_P (obj
))
795 OBJECT_NUM_CONFLICTS (obj
) = 0;
796 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
798 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
802 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
803 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
807 /* The array used to find duplications in conflict vectors of
809 static int *conflict_check
;
811 /* The value used to mark allocation presence in conflict vector of
812 the current allocno. */
813 static int curr_conflict_check_tick
;
815 /* Remove duplications in conflict vector of OBJ. */
817 compress_conflict_vec (ira_object_t obj
)
819 ira_object_t
*vec
, conflict_obj
;
822 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
823 vec
= OBJECT_CONFLICT_VEC (obj
);
824 curr_conflict_check_tick
++;
825 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
827 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
828 if (conflict_check
[id
] != curr_conflict_check_tick
)
830 conflict_check
[id
] = curr_conflict_check_tick
;
831 vec
[j
++] = conflict_obj
;
834 OBJECT_NUM_CONFLICTS (obj
) = j
;
838 /* Remove duplications in conflict vectors of all allocnos. */
840 compress_conflict_vecs (void)
843 ira_object_iterator oi
;
845 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
846 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
847 curr_conflict_check_tick
= 0;
848 FOR_EACH_OBJECT (obj
, oi
)
850 if (OBJECT_CONFLICT_VEC_P (obj
))
851 compress_conflict_vec (obj
);
853 ira_free (conflict_check
);
856 /* This recursive function outputs allocno A and if it is a cap the
857 function outputs its members. */
859 ira_print_expanded_allocno (ira_allocno_t a
)
863 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
864 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
865 fprintf (ira_dump_file
, ",b%d", bb
->index
);
867 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
868 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
870 fprintf (ira_dump_file
, ":");
871 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
873 fprintf (ira_dump_file
, ")");
876 /* Create and return the cap representing allocno A in the
879 create_cap_allocno (ira_allocno_t a
)
882 ira_loop_tree_node_t parent
;
883 enum reg_class aclass
;
885 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
886 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
887 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
888 ALLOCNO_WMODE (cap
) = ALLOCNO_WMODE (a
);
889 aclass
= ALLOCNO_CLASS (a
);
890 ira_set_allocno_class (cap
, aclass
);
891 ira_create_allocno_objects (cap
);
892 ALLOCNO_CAP_MEMBER (cap
) = a
;
893 ALLOCNO_CAP (a
) = cap
;
894 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
895 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
896 ira_allocate_and_copy_costs
897 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
898 ira_allocate_and_copy_costs
899 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
900 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
901 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
902 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
903 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
904 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
906 merge_hard_reg_conflicts (a
, cap
, false);
908 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
909 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
910 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap
),
911 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
912 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
914 fprintf (ira_dump_file
, " Creating cap ");
915 ira_print_expanded_allocno (cap
);
916 fprintf (ira_dump_file
, "\n");
921 /* Create and return a live range for OBJECT with given attributes. */
923 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
928 p
= live_range_pool
.allocate ();
936 /* Create a new live range for OBJECT and queue it at the head of its
939 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
942 p
= ira_create_live_range (object
, start
, finish
,
943 OBJECT_LIVE_RANGES (object
));
944 OBJECT_LIVE_RANGES (object
) = p
;
947 /* Copy allocno live range R and return the result. */
949 copy_live_range (live_range_t r
)
953 p
= live_range_pool
.allocate ();
958 /* Copy allocno live range list given by its head R and return the
961 ira_copy_live_range_list (live_range_t r
)
963 live_range_t p
, first
, last
;
967 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
969 p
= copy_live_range (r
);
979 /* Merge ranges R1 and R2 and returns the result. The function
980 maintains the order of ranges and tries to minimize number of the
983 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
985 live_range_t first
, last
;
991 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
993 if (r1
->start
< r2
->start
)
995 if (r1
->start
<= r2
->finish
+ 1)
997 /* Intersected ranges: merge r1 and r2 into r1. */
998 r1
->start
= r2
->start
;
999 if (r1
->finish
< r2
->finish
)
1000 r1
->finish
= r2
->finish
;
1001 live_range_t temp
= r2
;
1003 ira_finish_live_range (temp
);
1006 /* To try to merge with subsequent ranges in r1. */
1013 /* Add r1 to the result. */
1024 /* To try to merge with subsequent ranges in r2. */
1036 ira_assert (r1
->next
== NULL
);
1038 else if (r2
!= NULL
)
1044 ira_assert (r2
->next
== NULL
);
1048 ira_assert (last
->next
== NULL
);
1053 /* Return TRUE if live ranges R1 and R2 intersect. */
1055 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1057 /* Remember the live ranges are always kept ordered. */
1058 while (r1
!= NULL
&& r2
!= NULL
)
1060 if (r1
->start
> r2
->finish
)
1062 else if (r2
->start
> r1
->finish
)
1070 /* Free allocno live range R. */
1072 ira_finish_live_range (live_range_t r
)
1074 live_range_pool
.remove (r
);
1077 /* Free list of allocno live ranges starting with R. */
1079 ira_finish_live_range_list (live_range_t r
)
1081 live_range_t next_r
;
1083 for (; r
!= NULL
; r
= next_r
)
1086 ira_finish_live_range (r
);
1090 /* Free updated register costs of allocno A. */
1092 ira_free_allocno_updated_costs (ira_allocno_t a
)
1094 enum reg_class aclass
;
1096 aclass
= ALLOCNO_CLASS (a
);
1097 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1098 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1099 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1100 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1101 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1103 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1106 /* Free and nullify all cost vectors allocated earlier for allocno
1109 ira_free_allocno_costs (ira_allocno_t a
)
1111 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1113 ira_allocno_object_iterator oi
;
1115 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1117 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1118 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1119 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1120 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1121 object_pool
.remove (obj
);
1124 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1125 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1126 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1127 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1128 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1129 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1130 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1131 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1132 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1134 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1135 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1136 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1137 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1140 /* Free the memory allocated for allocno A. */
1142 finish_allocno (ira_allocno_t a
)
1144 ira_free_allocno_costs (a
);
1145 allocno_pool
.remove (a
);
1148 /* Free the memory allocated for all allocnos. */
1150 finish_allocnos (void)
1153 ira_allocno_iterator ai
;
1155 FOR_EACH_ALLOCNO (a
, ai
)
1157 ira_free (ira_regno_allocno_map
);
1158 ira_object_id_map_vec
.release ();
1159 allocno_vec
.release ();
1160 allocno_pool
.release ();
1161 object_pool
.release ();
1162 live_range_pool
.release ();
1167 /* Pools for allocno preferences. */
1168 static object_allocator
<ira_allocno_pref
> pref_pool ("prefs");
1170 /* Vec containing references to all created preferences. It is a
1171 container of array ira_prefs. */
1172 static vec
<ira_pref_t
> pref_vec
;
1174 /* The function initializes data concerning allocno prefs. */
1176 initiate_prefs (void)
1178 pref_vec
.create (get_max_uid ());
1183 /* Return pref for A and HARD_REGNO if any. */
1185 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1189 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1190 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1195 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1197 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1201 pref
= pref_pool
.allocate ();
1202 pref
->num
= ira_prefs_num
;
1204 pref
->hard_regno
= hard_regno
;
1206 pref_vec
.safe_push (pref
);
1207 ira_prefs
= pref_vec
.address ();
1208 ira_prefs_num
= pref_vec
.length ();
1212 /* Attach a pref PREF to the corresponding allocno. */
1214 add_allocno_pref_to_list (ira_pref_t pref
)
1216 ira_allocno_t a
= pref
->allocno
;
1218 pref
->next_pref
= ALLOCNO_PREFS (a
);
1219 ALLOCNO_PREFS (a
) = pref
;
1222 /* Create (or update frequency if the pref already exists) the pref of
1223 allocnos A preferring HARD_REGNO with frequency FREQ. */
1225 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1231 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1236 pref
= ira_create_pref (a
, hard_regno
, freq
);
1237 ira_assert (a
!= NULL
);
1238 add_allocno_pref_to_list (pref
);
1241 /* Print info about PREF into file F. */
1243 print_pref (FILE *f
, ira_pref_t pref
)
1245 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1246 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1247 pref
->hard_regno
, pref
->freq
);
1250 /* Print info about PREF into stderr. */
1252 ira_debug_pref (ira_pref_t pref
)
1254 print_pref (stderr
, pref
);
1257 /* Print info about all prefs into file F. */
1259 print_prefs (FILE *f
)
1262 ira_pref_iterator pi
;
1264 FOR_EACH_PREF (pref
, pi
)
1265 print_pref (f
, pref
);
1268 /* Print info about all prefs into stderr. */
1270 ira_debug_prefs (void)
1272 print_prefs (stderr
);
1275 /* Print info about prefs involving allocno A into file F. */
1277 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1281 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1282 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1283 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1287 /* Print info about prefs involving allocno A into stderr. */
1289 ira_debug_allocno_prefs (ira_allocno_t a
)
1291 print_allocno_prefs (stderr
, a
);
1294 /* The function frees memory allocated for PREF. */
1296 finish_pref (ira_pref_t pref
)
1298 ira_prefs
[pref
->num
] = NULL
;
1299 pref_pool
.remove (pref
);
1302 /* Remove PREF from the list of allocno prefs and free memory for
1305 ira_remove_pref (ira_pref_t pref
)
1307 ira_pref_t cpref
, prev
;
1309 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1310 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1311 pref
->num
, pref
->hard_regno
, pref
->freq
);
1312 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1314 prev
= cpref
, cpref
= cpref
->next_pref
)
1317 ira_assert (cpref
!= NULL
);
1319 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1321 prev
->next_pref
= pref
->next_pref
;
1325 /* Remove all prefs of allocno A. */
1327 ira_remove_allocno_prefs (ira_allocno_t a
)
1329 ira_pref_t pref
, next_pref
;
1331 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1333 next_pref
= pref
->next_pref
;
1336 ALLOCNO_PREFS (a
) = NULL
;
1339 /* Free memory allocated for all prefs. */
1344 ira_pref_iterator pi
;
1346 FOR_EACH_PREF (pref
, pi
)
1348 pref_vec
.release ();
1349 pref_pool
.release ();
1354 /* Pools for copies. */
1355 static object_allocator
<ira_allocno_copy
> copy_pool ("copies");
1357 /* Vec containing references to all created copies. It is a
1358 container of array ira_copies. */
1359 static vec
<ira_copy_t
> copy_vec
;
1361 /* The function initializes data concerning allocno copies. */
1363 initiate_copies (void)
1365 copy_vec
.create (get_max_uid ());
1370 /* Return copy connecting A1 and A2 and originated from INSN of
1371 LOOP_TREE_NODE if any. */
1373 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx_insn
*insn
,
1374 ira_loop_tree_node_t loop_tree_node
)
1376 ira_copy_t cp
, next_cp
;
1377 ira_allocno_t another_a
;
1379 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1381 if (cp
->first
== a1
)
1383 next_cp
= cp
->next_first_allocno_copy
;
1384 another_a
= cp
->second
;
1386 else if (cp
->second
== a1
)
1388 next_cp
= cp
->next_second_allocno_copy
;
1389 another_a
= cp
->first
;
1393 if (another_a
== a2
&& cp
->insn
== insn
1394 && cp
->loop_tree_node
== loop_tree_node
)
1400 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1401 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1403 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1404 bool constraint_p
, rtx_insn
*insn
,
1405 ira_loop_tree_node_t loop_tree_node
)
1409 cp
= copy_pool
.allocate ();
1410 cp
->num
= ira_copies_num
;
1412 cp
->second
= second
;
1414 cp
->constraint_p
= constraint_p
;
1416 cp
->loop_tree_node
= loop_tree_node
;
1417 copy_vec
.safe_push (cp
);
1418 ira_copies
= copy_vec
.address ();
1419 ira_copies_num
= copy_vec
.length ();
1423 /* Attach a copy CP to allocnos involved into the copy. */
1425 add_allocno_copy_to_list (ira_copy_t cp
)
1427 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1429 cp
->prev_first_allocno_copy
= NULL
;
1430 cp
->prev_second_allocno_copy
= NULL
;
1431 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1432 if (cp
->next_first_allocno_copy
!= NULL
)
1434 if (cp
->next_first_allocno_copy
->first
== first
)
1435 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1437 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1439 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1440 if (cp
->next_second_allocno_copy
!= NULL
)
1442 if (cp
->next_second_allocno_copy
->second
== second
)
1443 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1445 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1447 ALLOCNO_COPIES (first
) = cp
;
1448 ALLOCNO_COPIES (second
) = cp
;
1451 /* Make a copy CP a canonical copy where number of the
1452 first allocno is less than the second one. */
1454 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1456 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1459 std::swap (cp
->first
, cp
->second
);
1460 std::swap (cp
->prev_first_allocno_copy
, cp
->prev_second_allocno_copy
);
1461 std::swap (cp
->next_first_allocno_copy
, cp
->next_second_allocno_copy
);
1464 /* Create (or update frequency if the copy already exists) and return
1465 the copy of allocnos FIRST and SECOND with frequency FREQ
1466 corresponding to move insn INSN (if any) and originated from
1469 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1470 bool constraint_p
, rtx_insn
*insn
,
1471 ira_loop_tree_node_t loop_tree_node
)
1475 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1480 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1482 ira_assert (first
!= NULL
&& second
!= NULL
);
1483 add_allocno_copy_to_list (cp
);
1484 swap_allocno_copy_ends_if_necessary (cp
);
1488 /* Print info about copy CP into file F. */
1490 print_copy (FILE *f
, ira_copy_t cp
)
1492 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1493 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1494 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1496 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1500 debug (ira_allocno_copy
&ref
)
1502 print_copy (stderr
, &ref
);
1506 debug (ira_allocno_copy
*ptr
)
1511 fprintf (stderr
, "<nil>\n");
1514 /* Print info about copy CP into stderr. */
1516 ira_debug_copy (ira_copy_t cp
)
1518 print_copy (stderr
, cp
);
1521 /* Print info about all copies into file F. */
1523 print_copies (FILE *f
)
1526 ira_copy_iterator ci
;
1528 FOR_EACH_COPY (cp
, ci
)
1532 /* Print info about all copies into stderr. */
1534 ira_debug_copies (void)
1536 print_copies (stderr
);
1539 /* Print info about copies involving allocno A into file F. */
1541 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1543 ira_allocno_t another_a
;
1544 ira_copy_t cp
, next_cp
;
1546 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1547 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1551 next_cp
= cp
->next_first_allocno_copy
;
1552 another_a
= cp
->second
;
1554 else if (cp
->second
== a
)
1556 next_cp
= cp
->next_second_allocno_copy
;
1557 another_a
= cp
->first
;
1561 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1562 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1568 debug (ira_allocno
&ref
)
1570 print_allocno_copies (stderr
, &ref
);
1574 debug (ira_allocno
*ptr
)
1579 fprintf (stderr
, "<nil>\n");
1583 /* Print info about copies involving allocno A into stderr. */
1585 ira_debug_allocno_copies (ira_allocno_t a
)
1587 print_allocno_copies (stderr
, a
);
1590 /* The function frees memory allocated for copy CP. */
1592 finish_copy (ira_copy_t cp
)
1594 copy_pool
.remove (cp
);
1598 /* Free memory allocated for all copies. */
1600 finish_copies (void)
1603 ira_copy_iterator ci
;
1605 FOR_EACH_COPY (cp
, ci
)
1607 copy_vec
.release ();
1608 copy_pool
.release ();
1613 /* Pools for cost vectors. It is defined only for allocno classes. */
1614 static pool_allocator
*cost_vector_pool
[N_REG_CLASSES
];
1616 /* The function initiates work with hard register cost vectors. It
1617 creates allocation pool for each allocno class. */
1619 initiate_cost_vectors (void)
1622 enum reg_class aclass
;
1624 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1626 aclass
= ira_allocno_classes
[i
];
1627 cost_vector_pool
[aclass
] = new pool_allocator
1628 ("cost vectors", sizeof (int) * (ira_class_hard_regs_num
[aclass
]));
1632 /* Allocate and return a cost vector VEC for ACLASS. */
1634 ira_allocate_cost_vector (reg_class_t aclass
)
1636 return (int*) cost_vector_pool
[(int) aclass
]->allocate ();
1639 /* Free a cost vector VEC for ACLASS. */
1641 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1643 ira_assert (vec
!= NULL
);
1644 cost_vector_pool
[(int) aclass
]->remove (vec
);
1647 /* Finish work with hard register cost vectors. Release allocation
1648 pool for each allocno class. */
1650 finish_cost_vectors (void)
1653 enum reg_class aclass
;
1655 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1657 aclass
= ira_allocno_classes
[i
];
1658 delete cost_vector_pool
[aclass
];
1664 /* Compute a post-ordering of the reverse control flow of the loop body
1665 designated by the children nodes of LOOP_NODE, whose body nodes in
1666 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1667 of the reverse loop body.
1669 For the post-order of the reverse CFG, we visit the basic blocks in
1670 LOOP_PREORDER array in the reverse order of where they appear.
1671 This is important: We do not just want to compute a post-order of
1672 the reverse CFG, we want to make a best-guess for a visiting order that
1673 minimizes the number of chain elements per allocno live range. If the
1674 blocks would be visited in a different order, we would still compute a
1675 correct post-ordering but it would be less likely that two nodes
1676 connected by an edge in the CFG are neighbors in the topsort. */
1678 static vec
<ira_loop_tree_node_t
>
1679 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1680 vec
<ira_loop_tree_node_t
> loop_preorder
)
1682 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1683 unsigned int n_loop_preorder
;
1685 n_loop_preorder
= loop_preorder
.length ();
1686 if (n_loop_preorder
!= 0)
1688 ira_loop_tree_node_t subloop_node
;
1690 auto_vec
<ira_loop_tree_node_t
> dfs_stack
;
1692 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1693 the flag to mark blocks we still have to visit to add them to
1694 our post-order. Define an alias to avoid confusion. */
1695 #define BB_TO_VISIT BB_VISITED
1697 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1699 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1700 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1703 topsort_nodes
.create (n_loop_preorder
);
1704 dfs_stack
.create (n_loop_preorder
);
1706 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1708 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1711 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1712 dfs_stack
.quick_push (subloop_node
);
1713 while (! dfs_stack
.is_empty ())
1718 ira_loop_tree_node_t n
= dfs_stack
.last ();
1719 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1721 ira_loop_tree_node_t pred_node
;
1722 basic_block pred_bb
= e
->src
;
1724 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1727 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1729 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1731 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1732 dfs_stack
.quick_push (pred_node
);
1735 if (n
== dfs_stack
.last ())
1738 topsort_nodes
.quick_push (n
);
1746 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1747 return topsort_nodes
;
1750 /* The current loop tree node and its regno allocno map. */
1751 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1752 ira_allocno_t
*ira_curr_regno_allocno_map
;
1754 /* This recursive function traverses loop tree with root LOOP_NODE
1755 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1756 correspondingly in preorder and postorder. The function sets up
1757 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1758 basic block nodes of LOOP_NODE is also processed (before its
1761 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1762 the loop are passed in the *reverse* post-order of the *reverse*
1763 CFG. This is only used by ira_create_allocno_live_ranges, which
1764 wants to visit basic blocks in this order to minimize the number
1765 of elements per live range chain.
1766 Note that the loop tree nodes are still visited in the normal,
1767 forward post-order of the loop tree. */
1770 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1771 void (*preorder_func
) (ira_loop_tree_node_t
),
1772 void (*postorder_func
) (ira_loop_tree_node_t
))
1774 ira_loop_tree_node_t subloop_node
;
1776 ira_assert (loop_node
->bb
== NULL
);
1777 ira_curr_loop_tree_node
= loop_node
;
1778 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1780 if (preorder_func
!= NULL
)
1781 (*preorder_func
) (loop_node
);
1785 auto_vec
<ira_loop_tree_node_t
> loop_preorder
;
1788 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1789 is set up such that nodes in the loop body appear in a pre-order
1790 of their place in the CFG. */
1791 for (subloop_node
= loop_node
->children
;
1792 subloop_node
!= NULL
;
1793 subloop_node
= subloop_node
->next
)
1794 if (subloop_node
->bb
!= NULL
)
1795 loop_preorder
.safe_push (subloop_node
);
1797 if (preorder_func
!= NULL
)
1798 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1799 (*preorder_func
) (subloop_node
);
1801 if (postorder_func
!= NULL
)
1803 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1804 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1805 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1806 (*postorder_func
) (subloop_node
);
1807 loop_rev_postorder
.release ();
1811 for (subloop_node
= loop_node
->subloops
;
1812 subloop_node
!= NULL
;
1813 subloop_node
= subloop_node
->subloop_next
)
1815 ira_assert (subloop_node
->bb
== NULL
);
1816 ira_traverse_loop_tree (bb_p
, subloop_node
,
1817 preorder_func
, postorder_func
);
1820 ira_curr_loop_tree_node
= loop_node
;
1821 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1823 if (postorder_func
!= NULL
)
1824 (*postorder_func
) (loop_node
);
1829 /* The basic block currently being processed. */
1830 static basic_block curr_bb
;
1832 /* This recursive function creates allocnos corresponding to
1833 pseudo-registers containing in X. True OUTPUT_P means that X is
1834 an lvalue. PARENT corresponds to the parent expression of X. */
1836 create_insn_allocnos (rtx x
, rtx outer
, bool output_p
)
1840 enum rtx_code code
= GET_CODE (x
);
1846 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1850 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1852 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1853 if (outer
!= NULL
&& GET_CODE (outer
) == SUBREG
)
1855 machine_mode wmode
= GET_MODE (outer
);
1856 if (partial_subreg_p (ALLOCNO_WMODE (a
), wmode
))
1857 ALLOCNO_WMODE (a
) = wmode
;
1861 ALLOCNO_NREFS (a
)++;
1862 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1864 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1868 else if (code
== SET
)
1870 create_insn_allocnos (SET_DEST (x
), NULL
, true);
1871 create_insn_allocnos (SET_SRC (x
), NULL
, false);
1874 else if (code
== CLOBBER
)
1876 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1879 else if (code
== CLOBBER_HIGH
)
1881 gcc_assert (REG_P (XEXP (x
, 0)) && HARD_REGISTER_P (XEXP (x
, 0)));
1884 else if (code
== MEM
)
1886 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1889 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1890 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1892 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1893 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1897 fmt
= GET_RTX_FORMAT (code
);
1898 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1901 create_insn_allocnos (XEXP (x
, i
), x
, output_p
);
1902 else if (fmt
[i
] == 'E')
1903 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1904 create_insn_allocnos (XVECEXP (x
, i
, j
), x
, output_p
);
1908 /* Create allocnos corresponding to pseudo-registers living in the
1909 basic block represented by the corresponding loop tree node
1912 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1919 curr_bb
= bb
= bb_node
->bb
;
1920 ira_assert (bb
!= NULL
);
1921 FOR_BB_INSNS_REVERSE (bb
, insn
)
1922 if (NONDEBUG_INSN_P (insn
))
1923 create_insn_allocnos (PATTERN (insn
), NULL
, false);
1924 /* It might be a allocno living through from one subloop to
1926 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1927 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1928 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1931 /* Create allocnos corresponding to pseudo-registers living on edge E
1932 (a loop entry or exit). Also mark the allocnos as living on the
1935 create_loop_allocnos (edge e
)
1938 bitmap live_in_regs
, border_allocnos
;
1940 ira_loop_tree_node_t parent
;
1942 live_in_regs
= df_get_live_in (e
->dest
);
1943 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1944 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1945 FIRST_PSEUDO_REGISTER
, i
, bi
)
1946 if (bitmap_bit_p (live_in_regs
, i
))
1948 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1950 /* The order of creations is important for right
1951 ira_regno_allocno_map. */
1952 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1953 && parent
->regno_allocno_map
[i
] == NULL
)
1954 ira_create_allocno (i
, false, parent
);
1955 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1957 bitmap_set_bit (border_allocnos
,
1958 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1962 /* Create allocnos corresponding to pseudo-registers living in loop
1963 represented by the corresponding loop tree node LOOP_NODE. This
1964 function is called by ira_traverse_loop_tree. */
1966 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1968 if (loop_node
->bb
!= NULL
)
1969 create_bb_allocnos (loop_node
);
1970 else if (loop_node
!= ira_loop_tree_root
)
1977 ira_assert (current_loops
!= NULL
);
1978 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1979 if (e
->src
!= loop_node
->loop
->latch
)
1980 create_loop_allocnos (e
);
1982 edges
= get_loop_exit_edges (loop_node
->loop
);
1983 FOR_EACH_VEC_ELT (edges
, i
, e
)
1984 create_loop_allocnos (e
);
1989 /* Propagate information about allocnos modified inside the loop given
1990 by its LOOP_TREE_NODE to its parent. */
1992 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
1994 if (loop_tree_node
== ira_loop_tree_root
)
1996 ira_assert (loop_tree_node
->bb
== NULL
);
1997 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
1998 loop_tree_node
->modified_regnos
);
2001 /* Propagate new info about allocno A (see comments about accumulated
2002 info in allocno definition) to the corresponding allocno on upper
2003 loop tree level. So allocnos on upper levels accumulate
2004 information about the corresponding allocnos in nested regions.
2005 The new info means allocno info finally calculated in this
2008 propagate_allocno_info (void)
2011 ira_allocno_t a
, parent_a
;
2012 ira_loop_tree_node_t parent
;
2013 enum reg_class aclass
;
2015 if (flag_ira_region
!= IRA_REGION_ALL
2016 && flag_ira_region
!= IRA_REGION_MIXED
)
2018 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2019 for (a
= ira_regno_allocno_map
[i
];
2021 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2022 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2023 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2024 /* There are no caps yet at this point. So use
2025 border_allocnos to find allocnos for the propagation. */
2026 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2029 if (! ALLOCNO_BAD_SPILL_P (a
))
2030 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2031 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2032 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2033 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2034 merge_hard_reg_conflicts (a
, parent_a
, true);
2035 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2036 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2037 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2038 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2039 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
),
2040 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
2041 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2042 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2043 aclass
= ALLOCNO_CLASS (a
);
2044 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2045 ira_allocate_and_accumulate_costs
2046 (&ALLOCNO_HARD_REG_COSTS (parent_a
), aclass
,
2047 ALLOCNO_HARD_REG_COSTS (a
));
2048 ira_allocate_and_accumulate_costs
2049 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2051 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2052 ALLOCNO_CLASS_COST (parent_a
)
2053 += ALLOCNO_CLASS_COST (a
);
2054 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2058 /* Create allocnos corresponding to pseudo-registers in the current
2059 function. Traverse the loop tree for this. */
2061 create_allocnos (void)
2063 /* We need to process BB first to correctly link allocnos by member
2064 next_regno_allocno. */
2065 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2066 create_loop_tree_node_allocnos
, NULL
);
2068 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2069 propagate_modified_regnos
);
2074 /* The page contains function to remove some regions from a separate
2075 register allocation. We remove regions whose separate allocation
2076 will hardly improve the result. As a result we speed up regional
2077 register allocation. */
2079 /* The function changes the object in range list given by R to OBJ. */
2081 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2083 for (; r
!= NULL
; r
= r
->next
)
2087 /* Move all live ranges associated with allocno FROM to allocno TO. */
2089 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2092 int n
= ALLOCNO_NUM_OBJECTS (from
);
2094 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2096 for (i
= 0; i
< n
; i
++)
2098 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2099 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2100 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2102 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2104 fprintf (ira_dump_file
,
2105 " Moving ranges of a%dr%d to a%dr%d: ",
2106 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2107 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2108 ira_print_live_range_list (ira_dump_file
, lr
);
2110 change_object_in_range_list (lr
, to_obj
);
2111 OBJECT_LIVE_RANGES (to_obj
)
2112 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2113 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2118 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2121 int n
= ALLOCNO_NUM_OBJECTS (from
);
2123 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2125 for (i
= 0; i
< n
; i
++)
2127 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2128 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2129 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2131 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2133 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2134 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2135 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2136 ira_print_live_range_list (ira_dump_file
, lr
);
2138 lr
= ira_copy_live_range_list (lr
);
2139 change_object_in_range_list (lr
, to_obj
);
2140 OBJECT_LIVE_RANGES (to_obj
)
2141 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2145 /* Return TRUE if NODE represents a loop with low register
2148 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2151 enum reg_class pclass
;
2153 if (node
->bb
!= NULL
)
2156 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2158 pclass
= ira_pressure_classes
[i
];
2159 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2160 && ira_class_hard_regs_num
[pclass
] > 1)
2167 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2168 form a region from such loop if the target use stack register
2169 because reg-stack.c can not deal with such edges. */
2171 loop_with_complex_edge_p (struct loop
*loop
)
2179 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2180 if (e
->flags
& EDGE_EH
)
2182 edges
= get_loop_exit_edges (loop
);
2184 FOR_EACH_VEC_ELT (edges
, i
, e
)
2185 if (e
->flags
& EDGE_COMPLEX
)
2195 /* Sort loops for marking them for removal. We put already marked
2196 loops first, then less frequent loops next, and then outer loops
2199 loop_compare_func (const void *v1p
, const void *v2p
)
2202 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2203 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2205 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2206 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2208 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2210 if ((diff
= l1
->loop
->header
->count
.to_frequency (cfun
)
2211 - l2
->loop
->header
->count
.to_frequency (cfun
)) != 0)
2213 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2215 /* Make sorting stable. */
2216 return l1
->loop_num
- l2
->loop_num
;
2219 /* Mark loops which should be removed from regional allocation. We
2220 remove a loop with low register pressure inside another loop with
2221 register pressure. In this case a separate allocation of the loop
2222 hardly helps (for irregular register file architecture it could
2223 help by choosing a better hard register in the loop but we prefer
2224 faster allocation even in this case). We also remove cheap loops
2225 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2226 exit or enter edges are removed too because the allocation might
2227 require put pseudo moves on the EH edges (we could still do this
2228 for pseudos with caller saved hard registers in some cases but it
2229 is impossible to say here or during top-down allocation pass what
2230 hard register the pseudos get finally). */
2232 mark_loops_for_removal (void)
2235 ira_loop_tree_node_t
*sorted_loops
;
2238 ira_assert (current_loops
!= NULL
);
2240 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2241 * number_of_loops (cfun
));
2242 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2243 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2245 if (ira_loop_nodes
[i
].parent
== NULL
)
2247 /* Don't remove the root. */
2248 ira_loop_nodes
[i
].to_remove_p
= false;
2251 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2252 ira_loop_nodes
[i
].to_remove_p
2253 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2254 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2256 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2260 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2261 for (i
= 0; i
< n
- IRA_MAX_LOOPS_NUM
; i
++)
2263 sorted_loops
[i
]->to_remove_p
= true;
2264 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2267 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2268 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2269 sorted_loops
[i
]->loop
->header
->count
.to_frequency (cfun
),
2270 loop_depth (sorted_loops
[i
]->loop
),
2271 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2272 && low_pressure_loop_node_p (sorted_loops
[i
])
2273 ? "low pressure" : "cheap loop");
2275 ira_free (sorted_loops
);
2278 /* Mark all loops but root for removing. */
2280 mark_all_loops_for_removal (void)
2285 ira_assert (current_loops
!= NULL
);
2286 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2287 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2289 if (ira_loop_nodes
[i
].parent
== NULL
)
2291 /* Don't remove the root. */
2292 ira_loop_nodes
[i
].to_remove_p
= false;
2295 ira_loop_nodes
[i
].to_remove_p
= true;
2296 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2299 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2300 ira_loop_nodes
[i
].loop_num
,
2301 ira_loop_nodes
[i
].loop
->header
->index
,
2302 ira_loop_nodes
[i
].loop
->header
->count
.to_frequency (cfun
),
2303 loop_depth (ira_loop_nodes
[i
].loop
));
2307 /* Definition of vector of loop tree nodes. */
2309 /* Vec containing references to all removed loop tree nodes. */
2310 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2312 /* Vec containing references to all children of loop tree nodes. */
2313 static vec
<ira_loop_tree_node_t
> children_vec
;
2315 /* Remove subregions of NODE if their separate allocation will not
2316 improve the result. */
2318 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2322 ira_loop_tree_node_t subnode
;
2324 remove_p
= node
->to_remove_p
;
2326 children_vec
.safe_push (node
);
2327 start
= children_vec
.length ();
2328 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2329 if (subnode
->bb
== NULL
)
2330 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2332 children_vec
.safe_push (subnode
);
2333 node
->children
= node
->subloops
= NULL
;
2336 removed_loop_vec
.safe_push (node
);
2339 while (children_vec
.length () > start
)
2341 subnode
= children_vec
.pop ();
2342 subnode
->parent
= node
;
2343 subnode
->next
= node
->children
;
2344 node
->children
= subnode
;
2345 if (subnode
->bb
== NULL
)
2347 subnode
->subloop_next
= node
->subloops
;
2348 node
->subloops
= subnode
;
2353 /* Return TRUE if NODE is inside PARENT. */
2355 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2357 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2363 /* Sort allocnos according to their order in regno allocno list. */
2365 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2367 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2368 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2369 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2370 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2372 if (loop_is_inside_p (n1
, n2
))
2374 else if (loop_is_inside_p (n2
, n1
))
2376 /* If allocnos are equally good, sort by allocno numbers, so that
2377 the results of qsort leave nothing to chance. We put allocnos
2378 with higher number first in the list because it is the original
2379 order for allocnos from loops on the same levels. */
2380 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2383 /* This array is used to sort allocnos to restore allocno order in
2384 the regno allocno list. */
2385 static ira_allocno_t
*regno_allocnos
;
2387 /* Restore allocno order for REGNO in the regno allocno list. */
2389 ira_rebuild_regno_allocno_list (int regno
)
2394 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2396 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2397 regno_allocnos
[n
++] = a
;
2399 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2400 regno_allocno_order_compare_func
);
2401 for (i
= 1; i
< n
; i
++)
2402 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2403 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2404 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2405 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2406 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2409 /* Propagate info from allocno FROM_A to allocno A. */
2411 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2413 enum reg_class aclass
;
2415 merge_hard_reg_conflicts (from_a
, a
, false);
2416 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2417 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2418 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2419 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2420 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2421 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2422 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
),
2423 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a
));
2425 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2426 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2427 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2428 ALLOCNO_BAD_SPILL_P (a
) = false;
2429 aclass
= ALLOCNO_CLASS (from_a
);
2430 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2431 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2432 ALLOCNO_HARD_REG_COSTS (from_a
));
2433 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2435 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2436 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2437 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2440 /* Remove allocnos from loops removed from the allocation
2443 remove_unnecessary_allocnos (void)
2446 bool merged_p
, rebuild_p
;
2447 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2448 ira_loop_tree_node_t a_node
, parent
;
2451 regno_allocnos
= NULL
;
2452 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2455 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2459 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2460 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2461 if (! a_node
->to_remove_p
)
2465 for (parent
= a_node
->parent
;
2466 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2467 && parent
->to_remove_p
;
2468 parent
= parent
->parent
)
2470 if (parent_a
== NULL
)
2472 /* There are no allocnos with the same regno in
2473 upper region -- just move the allocno to the
2476 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2477 parent
->regno_allocno_map
[regno
] = a
;
2478 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2483 /* Remove the allocno and update info of allocno in
2484 the upper region. */
2486 ira_regno_allocno_map
[regno
] = next_a
;
2488 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2489 move_allocno_live_ranges (a
, parent_a
);
2491 propagate_some_info_from_allocno (parent_a
, a
);
2492 /* Remove it from the corresponding regno allocno
2493 map to avoid info propagation of subsequent
2494 allocno into this already removed allocno. */
2495 a_node
->regno_allocno_map
[regno
] = NULL
;
2496 ira_remove_allocno_prefs (a
);
2502 /* We need to restore the order in regno allocno list. */
2504 if (regno_allocnos
== NULL
)
2506 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2507 * ira_allocnos_num
);
2508 ira_rebuild_regno_allocno_list (regno
);
2512 ira_rebuild_start_finish_chains ();
2513 if (regno_allocnos
!= NULL
)
2514 ira_free (regno_allocnos
);
2517 /* Remove allocnos from all loops but the root. */
2519 remove_low_level_allocnos (void)
2522 bool merged_p
, propagate_p
;
2523 ira_allocno_t a
, top_a
;
2524 ira_loop_tree_node_t a_node
, parent
;
2525 ira_allocno_iterator ai
;
2528 FOR_EACH_ALLOCNO (a
, ai
)
2530 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2531 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2533 regno
= ALLOCNO_REGNO (a
);
2534 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2536 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2537 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2540 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2541 /* Remove the allocno and update info of allocno in the upper
2543 move_allocno_live_ranges (a
, top_a
);
2546 propagate_some_info_from_allocno (top_a
, a
);
2548 FOR_EACH_ALLOCNO (a
, ai
)
2550 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2551 if (a_node
== ira_loop_tree_root
)
2553 parent
= a_node
->parent
;
2554 regno
= ALLOCNO_REGNO (a
);
2555 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2556 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2557 else if (ALLOCNO_CAP (a
) == NULL
)
2558 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2560 FOR_EACH_ALLOCNO (a
, ai
)
2562 regno
= ALLOCNO_REGNO (a
);
2563 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2566 ira_allocno_object_iterator oi
;
2568 ira_regno_allocno_map
[regno
] = a
;
2569 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2570 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2571 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2572 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
2573 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
2575 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2576 ALLOCNO_NO_STACK_REG_P (a
) = true;
2581 ira_remove_allocno_prefs (a
);
2586 ira_rebuild_start_finish_chains ();
2589 /* Remove loops from consideration. We remove all loops except for
2590 root if ALL_P or loops for which a separate allocation will not
2591 improve the result. We have to do this after allocno creation and
2592 their costs and allocno class evaluation because only after that
2593 the register pressure can be known and is calculated. */
2595 remove_unnecessary_regions (bool all_p
)
2597 if (current_loops
== NULL
)
2600 mark_all_loops_for_removal ();
2602 mark_loops_for_removal ();
2603 children_vec
.create (last_basic_block_for_fn (cfun
)
2604 + number_of_loops (cfun
));
2605 removed_loop_vec
.create (last_basic_block_for_fn (cfun
)
2606 + number_of_loops (cfun
));
2607 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2608 children_vec
.release ();
2610 remove_low_level_allocnos ();
2612 remove_unnecessary_allocnos ();
2613 while (removed_loop_vec
.length () > 0)
2614 finish_loop_tree_node (removed_loop_vec
.pop ());
2615 removed_loop_vec
.release ();
2620 /* At this point true value of allocno attribute bad_spill_p means
2621 that there is an insn where allocno occurs and where the allocno
2622 can not be used as memory. The function updates the attribute, now
2623 it can be true only for allocnos which can not be used as memory in
2624 an insn and in whose live ranges there is other allocno deaths.
2625 Spilling allocnos with true value will not improve the code because
2626 it will not make other allocnos colorable and additional reloads
2627 for the corresponding pseudo will be generated in reload pass for
2628 each insn it occurs.
2630 This is a trick mentioned in one classic article of Chaitin etc
2631 which is frequently omitted in other implementations of RA based on
2634 update_bad_spill_attribute (void)
2638 ira_allocno_iterator ai
;
2639 ira_allocno_object_iterator aoi
;
2642 enum reg_class aclass
;
2643 bitmap_head dead_points
[N_REG_CLASSES
];
2645 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2647 aclass
= ira_allocno_classes
[i
];
2648 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2650 FOR_EACH_ALLOCNO (a
, ai
)
2652 aclass
= ALLOCNO_CLASS (a
);
2653 if (aclass
== NO_REGS
)
2655 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2656 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2657 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2659 FOR_EACH_ALLOCNO (a
, ai
)
2661 aclass
= ALLOCNO_CLASS (a
);
2662 if (aclass
== NO_REGS
)
2664 if (! ALLOCNO_BAD_SPILL_P (a
))
2666 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2668 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2670 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2671 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2678 ALLOCNO_BAD_SPILL_P (a
) = false;
2683 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2685 aclass
= ira_allocno_classes
[i
];
2686 bitmap_clear (&dead_points
[aclass
]);
2692 /* Set up minimal and maximal live range points for allocnos. */
2694 setup_min_max_allocno_live_range_point (void)
2697 ira_allocno_t a
, parent_a
, cap
;
2698 ira_allocno_iterator ai
;
2699 #ifdef ENABLE_IRA_CHECKING
2700 ira_object_iterator oi
;
2704 ira_loop_tree_node_t parent
;
2706 FOR_EACH_ALLOCNO (a
, ai
)
2708 int n
= ALLOCNO_NUM_OBJECTS (a
);
2710 for (i
= 0; i
< n
; i
++)
2712 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2713 r
= OBJECT_LIVE_RANGES (obj
);
2716 OBJECT_MAX (obj
) = r
->finish
;
2717 for (; r
->next
!= NULL
; r
= r
->next
)
2719 OBJECT_MIN (obj
) = r
->start
;
2722 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2723 for (a
= ira_regno_allocno_map
[i
];
2725 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2728 int n
= ALLOCNO_NUM_OBJECTS (a
);
2730 for (j
= 0; j
< n
; j
++)
2732 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2733 ira_object_t parent_obj
;
2735 if (OBJECT_MAX (obj
) < 0)
2737 /* The object is not used and hence does not live. */
2738 ira_assert (OBJECT_LIVE_RANGES (obj
) == NULL
);
2739 OBJECT_MAX (obj
) = 0;
2740 OBJECT_MIN (obj
) = 1;
2743 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2744 /* Accumulation of range info. */
2745 if (ALLOCNO_CAP (a
) != NULL
)
2747 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2749 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2750 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2751 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2752 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2753 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2757 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2759 parent_a
= parent
->regno_allocno_map
[i
];
2760 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2761 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2762 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2763 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2764 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2767 #ifdef ENABLE_IRA_CHECKING
2768 FOR_EACH_OBJECT (obj
, oi
)
2770 if ((OBJECT_MIN (obj
) >= 0 && OBJECT_MIN (obj
) <= ira_max_point
)
2771 && (OBJECT_MAX (obj
) >= 0 && OBJECT_MAX (obj
) <= ira_max_point
))
2778 /* Sort allocnos according to their live ranges. Allocnos with
2779 smaller allocno class are put first unless we use priority
2780 coloring. Allocnos with the same class are ordered according
2781 their start (min). Allocnos with the same start are ordered
2782 according their finish (max). */
2784 object_range_compare_func (const void *v1p
, const void *v2p
)
2787 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2788 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2789 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2790 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2792 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2794 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2796 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2799 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2801 sort_conflict_id_map (void)
2805 ira_allocno_iterator ai
;
2808 FOR_EACH_ALLOCNO (a
, ai
)
2810 ira_allocno_object_iterator oi
;
2813 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2814 ira_object_id_map
[num
++] = obj
;
2817 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2818 object_range_compare_func
);
2819 for (i
= 0; i
< num
; i
++)
2821 ira_object_t obj
= ira_object_id_map
[i
];
2823 gcc_assert (obj
!= NULL
);
2824 OBJECT_CONFLICT_ID (obj
) = i
;
2826 for (i
= num
; i
< ira_objects_num
; i
++)
2827 ira_object_id_map
[i
] = NULL
;
2830 /* Set up minimal and maximal conflict ids of allocnos with which
2831 given allocno can conflict. */
2833 setup_min_max_conflict_allocno_ids (void)
2836 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2837 int *live_range_min
, *last_lived
;
2838 int word0_min
, word0_max
;
2840 ira_allocno_iterator ai
;
2842 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2844 first_not_finished
= -1;
2845 for (i
= 0; i
< ira_objects_num
; i
++)
2847 ira_object_t obj
= ira_object_id_map
[i
];
2852 a
= OBJECT_ALLOCNO (obj
);
2856 aclass
= ALLOCNO_CLASS (a
);
2858 first_not_finished
= i
;
2862 start
= OBJECT_MIN (obj
);
2863 /* If we skip an allocno, the allocno with smaller ids will
2864 be also skipped because of the secondary sorting the
2865 range finishes (see function
2866 object_range_compare_func). */
2867 while (first_not_finished
< i
2868 && start
> OBJECT_MAX (ira_object_id_map
2869 [first_not_finished
]))
2870 first_not_finished
++;
2871 min
= first_not_finished
;
2874 /* We could increase min further in this case but it is good
2877 live_range_min
[i
] = OBJECT_MIN (obj
);
2878 OBJECT_MIN (obj
) = min
;
2880 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2882 filled_area_start
= -1;
2883 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2885 ira_object_t obj
= ira_object_id_map
[i
];
2890 a
= OBJECT_ALLOCNO (obj
);
2893 aclass
= ALLOCNO_CLASS (a
);
2894 for (j
= 0; j
< ira_max_point
; j
++)
2896 filled_area_start
= ira_max_point
;
2898 min
= live_range_min
[i
];
2899 finish
= OBJECT_MAX (obj
);
2900 max
= last_lived
[finish
];
2902 /* We could decrease max further in this case but it is good
2904 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2905 OBJECT_MAX (obj
) = max
;
2906 /* In filling, we can go further A range finish to recognize
2907 intersection quickly because if the finish of subsequently
2908 processed allocno (it has smaller conflict id) range is
2909 further A range finish than they are definitely intersected
2910 (the reason for this is the allocnos with bigger conflict id
2911 have their range starts not smaller than allocnos with
2913 for (j
= min
; j
< filled_area_start
; j
++)
2915 filled_area_start
= min
;
2917 ira_free (last_lived
);
2918 ira_free (live_range_min
);
2920 /* For allocnos with more than one object, we may later record extra conflicts in
2921 subobject 0 that we cannot really know about here.
2922 For now, simply widen the min/max range of these subobjects. */
2924 word0_min
= INT_MAX
;
2925 word0_max
= INT_MIN
;
2927 FOR_EACH_ALLOCNO (a
, ai
)
2929 int n
= ALLOCNO_NUM_OBJECTS (a
);
2934 obj0
= ALLOCNO_OBJECT (a
, 0);
2935 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2936 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2937 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2938 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2940 FOR_EACH_ALLOCNO (a
, ai
)
2942 int n
= ALLOCNO_NUM_OBJECTS (a
);
2947 obj0
= ALLOCNO_OBJECT (a
, 0);
2948 if (OBJECT_MIN (obj0
) > word0_min
)
2949 OBJECT_MIN (obj0
) = word0_min
;
2950 if (OBJECT_MAX (obj0
) < word0_max
)
2951 OBJECT_MAX (obj0
) = word0_max
;
2961 ira_allocno_iterator ai
;
2962 ira_loop_tree_node_t loop_tree_node
;
2964 FOR_EACH_ALLOCNO (a
, ai
)
2966 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
2968 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2969 create_cap_allocno (a
);
2970 else if (ALLOCNO_CAP (a
) == NULL
)
2972 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2973 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2974 create_cap_allocno (a
);
2981 /* The page contains code transforming more one region internal
2982 representation (IR) to one region IR which is necessary for reload.
2983 This transformation is called IR flattening. We might just rebuild
2984 the IR for one region but we don't do it because it takes a lot of
2987 /* Map: regno -> allocnos which will finally represent the regno for
2988 IR with one region. */
2989 static ira_allocno_t
*regno_top_level_allocno_map
;
2991 /* Find the allocno that corresponds to A at a level one higher up in the
2992 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2994 ira_parent_allocno (ira_allocno_t a
)
2996 ira_loop_tree_node_t parent
;
2998 if (ALLOCNO_CAP (a
) != NULL
)
3001 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3005 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3008 /* Find the allocno that corresponds to A at a level one higher up in the
3009 loop tree. If ALLOCNO_CAP is set for A, return that. */
3011 ira_parent_or_cap_allocno (ira_allocno_t a
)
3013 if (ALLOCNO_CAP (a
) != NULL
)
3014 return ALLOCNO_CAP (a
);
3016 return ira_parent_allocno (a
);
3019 /* Process all allocnos originated from pseudo REGNO and copy live
3020 ranges, hard reg conflicts, and allocno stack reg attributes from
3021 low level allocnos to final allocnos which are destinations of
3022 removed stores at a loop exit. Return true if we copied live
3025 copy_info_to_removed_store_destinations (int regno
)
3028 ira_allocno_t parent_a
= NULL
;
3029 ira_loop_tree_node_t parent
;
3033 for (a
= ira_regno_allocno_map
[regno
];
3035 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3037 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3038 /* This allocno will be removed. */
3041 /* Caps will be removed. */
3042 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3043 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3045 parent
= parent
->parent
)
3046 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3048 == regno_top_level_allocno_map
[REGNO
3049 (allocno_emit_reg (parent_a
))]
3050 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3052 if (parent
== NULL
|| parent_a
== NULL
)
3055 copy_allocno_live_ranges (a
, parent_a
);
3056 merge_hard_reg_conflicts (a
, parent_a
, true);
3058 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3059 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3060 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3061 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3062 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3063 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
),
3064 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
3065 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3066 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3072 /* Flatten the IR. In other words, this function transforms IR as if
3073 it were built with one region (without loops). We could make it
3074 much simpler by rebuilding IR with one region, but unfortunately it
3075 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3076 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3077 IRA_MAX_POINT before emitting insns on the loop borders. */
3079 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3084 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3086 enum reg_class aclass
;
3087 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3089 ira_loop_tree_node_t node
;
3091 ira_allocno_iterator ai
;
3092 ira_copy_iterator ci
;
3094 regno_top_level_allocno_map
3095 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3096 * sizeof (ira_allocno_t
));
3097 memset (regno_top_level_allocno_map
, 0,
3098 max_reg_num () * sizeof (ira_allocno_t
));
3099 new_pseudos_p
= merged_p
= false;
3100 FOR_EACH_ALLOCNO (a
, ai
)
3102 ira_allocno_object_iterator oi
;
3105 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3106 /* Caps are not in the regno allocno maps and they are never
3107 will be transformed into allocnos existing after IR
3110 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3111 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
3112 OBJECT_CONFLICT_HARD_REGS (obj
));
3114 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3117 /* Fix final allocno attributes. */
3118 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3121 for (a
= ira_regno_allocno_map
[i
];
3123 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3125 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3127 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3128 if (data
->somewhere_renamed_p
)
3129 new_pseudos_p
= true;
3130 parent_a
= ira_parent_allocno (a
);
3131 if (parent_a
== NULL
)
3133 ALLOCNO_COPIES (a
) = NULL
;
3134 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3137 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3139 if (data
->mem_optimized_dest
!= NULL
)
3141 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3142 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3144 merge_hard_reg_conflicts (a
, parent_a
, true);
3145 move_allocno_live_ranges (a
, parent_a
);
3147 parent_data
->mem_optimized_dest_p
3148 = (parent_data
->mem_optimized_dest_p
3149 || data
->mem_optimized_dest_p
);
3152 new_pseudos_p
= true;
3155 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3156 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3157 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3158 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3159 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3160 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3161 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3162 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3163 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3164 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3165 && ALLOCNO_NREFS (parent_a
) >= 0
3166 && ALLOCNO_FREQ (parent_a
) >= 0);
3167 aclass
= ALLOCNO_CLASS (parent_a
);
3168 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3169 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3170 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3171 for (j
= 0; j
< hard_regs_num
; j
++)
3172 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3173 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3174 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3175 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3176 for (j
= 0; j
< hard_regs_num
; j
++)
3177 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3178 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3179 ALLOCNO_CLASS_COST (parent_a
)
3180 -= ALLOCNO_CLASS_COST (a
);
3181 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3182 parent_a
= ira_parent_allocno (parent_a
);
3183 if (parent_a
== NULL
)
3186 ALLOCNO_COPIES (a
) = NULL
;
3187 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3189 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3192 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3193 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3194 ira_rebuild_start_finish_chains ();
3197 sparseset objects_live
;
3199 /* Rebuild conflicts. */
3200 FOR_EACH_ALLOCNO (a
, ai
)
3202 ira_allocno_object_iterator oi
;
3205 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3206 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3208 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3210 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3211 ira_assert (r
->object
== obj
);
3212 clear_conflicts (obj
);
3215 objects_live
= sparseset_alloc (ira_objects_num
);
3216 for (i
= 0; i
< ira_max_point
; i
++)
3218 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3220 ira_object_t obj
= r
->object
;
3222 a
= OBJECT_ALLOCNO (obj
);
3223 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3224 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3227 aclass
= ALLOCNO_CLASS (a
);
3228 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3230 ira_object_t live_obj
= ira_object_id_map
[n
];
3231 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3232 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3234 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3235 /* Don't set up conflict for the allocno with itself. */
3237 ira_add_conflict (obj
, live_obj
);
3239 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3242 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3243 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3245 sparseset_free (objects_live
);
3246 compress_conflict_vecs ();
3248 /* Mark some copies for removing and change allocnos in the rest
3250 FOR_EACH_COPY (cp
, ci
)
3252 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3253 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3255 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3257 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3258 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3259 ALLOCNO_NUM (cp
->first
),
3260 REGNO (allocno_emit_reg (cp
->first
)),
3261 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3262 ALLOCNO_NUM (cp
->second
),
3263 REGNO (allocno_emit_reg (cp
->second
)));
3264 cp
->loop_tree_node
= NULL
;
3268 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3270 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3271 node
= cp
->loop_tree_node
;
3273 keep_p
= true; /* It copy generated in ira-emit.c. */
3276 /* Check that the copy was not propagated from level on
3277 which we will have different pseudos. */
3278 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3279 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3280 keep_p
= ((REGNO (allocno_emit_reg (first
))
3281 == REGNO (allocno_emit_reg (node_first
)))
3282 && (REGNO (allocno_emit_reg (second
))
3283 == REGNO (allocno_emit_reg (node_second
))));
3287 cp
->loop_tree_node
= ira_loop_tree_root
;
3289 cp
->second
= second
;
3293 cp
->loop_tree_node
= NULL
;
3294 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3295 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3296 cp
->num
, ALLOCNO_NUM (cp
->first
),
3297 REGNO (allocno_emit_reg (cp
->first
)),
3298 ALLOCNO_NUM (cp
->second
),
3299 REGNO (allocno_emit_reg (cp
->second
)));
3302 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3303 FOR_EACH_ALLOCNO (a
, ai
)
3305 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3306 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3308 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3309 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3310 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3311 ira_remove_allocno_prefs (a
);
3315 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3316 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3317 ALLOCNO_CAP (a
) = NULL
;
3318 /* Restore updated costs for assignments from reload. */
3319 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3320 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3321 if (! ALLOCNO_ASSIGNED_P (a
))
3322 ira_free_allocno_updated_costs (a
);
3323 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3324 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3326 /* Remove unnecessary copies. */
3327 FOR_EACH_COPY (cp
, ci
)
3329 if (cp
->loop_tree_node
== NULL
)
3331 ira_copies
[cp
->num
] = NULL
;
3336 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3337 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3338 add_allocno_copy_to_list (cp
);
3339 swap_allocno_copy_ends_if_necessary (cp
);
3341 rebuild_regno_allocno_maps ();
3342 if (ira_max_point
!= ira_max_point_before_emit
)
3343 ira_compress_allocno_live_ranges ();
3344 ira_free (regno_top_level_allocno_map
);
3349 #ifdef ENABLE_IRA_CHECKING
3350 /* Check creation of all allocnos. Allocnos on lower levels should
3351 have allocnos or caps on all upper levels. */
3353 check_allocno_creation (void)
3356 ira_allocno_iterator ai
;
3357 ira_loop_tree_node_t loop_tree_node
;
3359 FOR_EACH_ALLOCNO (a
, ai
)
3361 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3362 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3364 if (loop_tree_node
== ira_loop_tree_root
)
3366 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3367 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3368 else if (ALLOCNO_CAP (a
) == NULL
)
3369 ira_assert (loop_tree_node
->parent
3370 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3371 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3377 /* Identify allocnos which prefer a register class with a single hard register.
3378 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3379 less likely to use the preferred singleton register. */
3381 update_conflict_hard_reg_costs (void)
3384 ira_allocno_iterator ai
;
3387 FOR_EACH_ALLOCNO (a
, ai
)
3389 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3390 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3391 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3394 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3397 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3398 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3401 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3402 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3403 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3404 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3407 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3409 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3410 -= min
- ALLOCNO_CLASS_COST (a
);
3414 /* Create a internal representation (IR) for IRA (allocnos, copies,
3415 loop tree nodes). The function returns TRUE if we generate loop
3416 structure (besides nodes representing all function and the basic
3417 blocks) for regional allocation. A true return means that we
3418 really need to flatten IR before the reload. */
3425 initiate_cost_vectors ();
3426 initiate_allocnos ();
3429 create_loop_tree_nodes ();
3433 create_allocno_objects ();
3434 ira_create_allocno_live_ranges ();
3435 remove_unnecessary_regions (false);
3436 ira_compress_allocno_live_ranges ();
3437 update_bad_spill_attribute ();
3438 loops_p
= more_one_region_p ();
3441 propagate_allocno_info ();
3444 ira_tune_allocno_costs ();
3445 #ifdef ENABLE_IRA_CHECKING
3446 check_allocno_creation ();
3448 setup_min_max_allocno_live_range_point ();
3449 sort_conflict_id_map ();
3450 setup_min_max_conflict_allocno_ids ();
3451 ira_build_conflicts ();
3452 update_conflict_hard_reg_costs ();
3453 if (! ira_conflicts_p
)
3456 ira_allocno_iterator ai
;
3458 /* Remove all regions but root one. */
3461 remove_unnecessary_regions (true);
3464 /* We don't save hard registers around calls for fast allocation
3465 -- add caller clobbered registers as conflicting ones to
3466 allocno crossing calls. */
3467 FOR_EACH_ALLOCNO (a
, ai
)
3468 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3469 ior_hard_reg_conflicts (a
, &call_used_reg_set
);
3471 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3472 print_copies (ira_dump_file
);
3473 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3474 print_prefs (ira_dump_file
);
3475 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3480 ira_allocno_iterator ai
;
3485 FOR_EACH_ALLOCNO (a
, ai
)
3487 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3491 for (j
= 0; j
< nobj
; j
++)
3493 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3494 n
+= OBJECT_NUM_CONFLICTS (obj
);
3495 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3499 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3500 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3501 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3502 fprintf (ira_dump_file
,
3503 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3504 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3509 /* Release the data created by function ira_build. */
3513 finish_loop_tree_nodes ();
3517 finish_cost_vectors ();
3518 ira_finish_allocno_live_ranges ();