1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
32 #include "insn-config.h"
34 #include "diagnostic-core.h"
38 #include "sparseset.h"
40 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
42 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx
,
43 ira_loop_tree_node_t
);
45 /* The root of the loop tree corresponding to the all function. */
46 ira_loop_tree_node_t ira_loop_tree_root
;
48 /* Height of the loop tree. */
49 int ira_loop_tree_height
;
51 /* All nodes representing basic blocks are referred through the
52 following array. We can not use basic block member `aux' for this
53 because it is used for insertion of insns on edges. */
54 ira_loop_tree_node_t ira_bb_nodes
;
56 /* All nodes representing loops are referred through the following
58 ira_loop_tree_node_t ira_loop_nodes
;
60 /* And size of the ira_loop_nodes array. */
61 unsigned int ira_loop_nodes_count
;
63 /* Map regno -> allocnos with given regno (see comments for
64 allocno member `next_regno_allocno'). */
65 ira_allocno_t
*ira_regno_allocno_map
;
67 /* Array of references to all allocnos. The order number of the
68 allocno corresponds to the index in the array. Removed allocnos
69 have NULL element value. */
70 ira_allocno_t
*ira_allocnos
;
72 /* Sizes of the previous array. */
75 /* Count of conflict record structures we've created, used when creating
79 /* Map a conflict id to its conflict record. */
80 ira_object_t
*ira_object_id_map
;
82 /* Array of references to all allocno preferences. The order number
83 of the preference corresponds to the index in the array. */
84 ira_pref_t
*ira_prefs
;
86 /* Size of the previous array. */
89 /* Array of references to all copies. The order number of the copy
90 corresponds to the index in the array. Removed copies have NULL
92 ira_copy_t
*ira_copies
;
94 /* Size of the previous array. */
99 /* LAST_BASIC_BLOCK before generating additional insns because of live
100 range splitting. Emitting insns on a critical edge creates a new
102 static int last_basic_block_before_change
;
104 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
105 the member loop_num. */
107 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
109 int max_regno
= max_reg_num ();
111 node
->regno_allocno_map
112 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
113 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
114 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
115 node
->all_allocnos
= ira_allocate_bitmap ();
116 node
->modified_regnos
= ira_allocate_bitmap ();
117 node
->border_allocnos
= ira_allocate_bitmap ();
118 node
->local_copies
= ira_allocate_bitmap ();
119 node
->loop_num
= loop_num
;
120 node
->children
= NULL
;
121 node
->subloops
= NULL
;
125 /* The following function allocates the loop tree nodes. If
126 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
127 the root which corresponds the all function) will be not allocated
128 but nodes will still be allocated for basic blocks. */
130 create_loop_tree_nodes (void)
140 = ((struct ira_loop_tree_node
*)
141 ira_allocate (sizeof (struct ira_loop_tree_node
) * last_basic_block
));
142 last_basic_block_before_change
= last_basic_block
;
143 for (i
= 0; i
< (unsigned int) last_basic_block
; i
++)
145 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
146 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
147 sizeof (ira_bb_nodes
[i
].reg_pressure
));
148 ira_bb_nodes
[i
].all_allocnos
= NULL
;
149 ira_bb_nodes
[i
].modified_regnos
= NULL
;
150 ira_bb_nodes
[i
].border_allocnos
= NULL
;
151 ira_bb_nodes
[i
].local_copies
= NULL
;
153 if (current_loops
== NULL
)
155 ira_loop_nodes_count
= 1;
156 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
157 ira_allocate (sizeof (struct ira_loop_tree_node
)));
158 init_loop_tree_node (ira_loop_nodes
, 0);
161 ira_loop_nodes_count
= number_of_loops (cfun
);
162 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
163 ira_allocate (sizeof (struct ira_loop_tree_node
)
164 * ira_loop_nodes_count
));
165 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
167 if (loop_outer (loop
) != NULL
)
169 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
171 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
172 if (e
->src
!= loop
->latch
173 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
180 edges
= get_loop_exit_edges (loop
);
181 FOR_EACH_VEC_ELT (edges
, j
, e
)
182 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
191 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
195 /* The function returns TRUE if there are more one allocation
198 more_one_region_p (void)
203 if (current_loops
!= NULL
)
204 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
205 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
206 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
211 /* Free the loop tree node of a loop. */
213 finish_loop_tree_node (ira_loop_tree_node_t loop
)
215 if (loop
->regno_allocno_map
!= NULL
)
217 ira_assert (loop
->bb
== NULL
);
218 ira_free_bitmap (loop
->local_copies
);
219 ira_free_bitmap (loop
->border_allocnos
);
220 ira_free_bitmap (loop
->modified_regnos
);
221 ira_free_bitmap (loop
->all_allocnos
);
222 ira_free (loop
->regno_allocno_map
);
223 loop
->regno_allocno_map
= NULL
;
227 /* Free the loop tree nodes. */
229 finish_loop_tree_nodes (void)
233 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
234 finish_loop_tree_node (&ira_loop_nodes
[i
]);
235 ira_free (ira_loop_nodes
);
236 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
238 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
239 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
240 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
241 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
242 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
243 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
244 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
245 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
246 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
247 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
249 ira_free (ira_bb_nodes
);
254 /* The following recursive function adds LOOP to the loop tree
255 hierarchy. LOOP is added only once. If LOOP is NULL we adding
256 loop designating the whole function when CFG loops are not
259 add_loop_to_tree (struct loop
*loop
)
263 ira_loop_tree_node_t loop_node
, parent_node
;
265 /* We can not use loop node access macros here because of potential
266 checking and because the nodes are not initialized enough
268 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
269 add_loop_to_tree (loop_outer (loop
));
270 loop_num
= loop
!= NULL
? loop
->num
: 0;
271 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
272 && ira_loop_nodes
[loop_num
].children
== NULL
)
274 /* We have not added loop node to the tree yet. */
275 loop_node
= &ira_loop_nodes
[loop_num
];
276 loop_node
->loop
= loop
;
277 loop_node
->bb
= NULL
;
282 for (parent
= loop_outer (loop
);
284 parent
= loop_outer (parent
))
285 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
290 loop_node
->next
= NULL
;
291 loop_node
->subloop_next
= NULL
;
292 loop_node
->parent
= NULL
;
296 parent_node
= &ira_loop_nodes
[parent
->num
];
297 loop_node
->next
= parent_node
->children
;
298 parent_node
->children
= loop_node
;
299 loop_node
->subloop_next
= parent_node
->subloops
;
300 parent_node
->subloops
= loop_node
;
301 loop_node
->parent
= parent_node
;
306 /* The following recursive function sets up levels of nodes of the
307 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
308 The function returns maximal value of level in the tree + 1. */
310 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
312 int height
, max_height
;
313 ira_loop_tree_node_t subloop_node
;
315 ira_assert (loop_node
->bb
== NULL
);
316 loop_node
->level
= level
;
317 max_height
= level
+ 1;
318 for (subloop_node
= loop_node
->subloops
;
319 subloop_node
!= NULL
;
320 subloop_node
= subloop_node
->subloop_next
)
322 ira_assert (subloop_node
->bb
== NULL
);
323 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
324 if (height
> max_height
)
330 /* Create the loop tree. The algorithm is designed to provide correct
331 order of loops (they are ordered by their last loop BB) and basic
332 blocks in the chain formed by member next. */
334 form_loop_tree (void)
338 ira_loop_tree_node_t bb_node
, loop_node
;
340 /* We can not use loop/bb node access macros because of potential
341 checking and because the nodes are not initialized enough
345 bb_node
= &ira_bb_nodes
[bb
->index
];
347 bb_node
->loop
= NULL
;
348 bb_node
->subloops
= NULL
;
349 bb_node
->children
= NULL
;
350 bb_node
->subloop_next
= NULL
;
351 bb_node
->next
= NULL
;
352 if (current_loops
== NULL
)
356 for (parent
= bb
->loop_father
;
358 parent
= loop_outer (parent
))
359 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
362 add_loop_to_tree (parent
);
363 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
364 bb_node
->next
= loop_node
->children
;
365 bb_node
->parent
= loop_node
;
366 loop_node
->children
= bb_node
;
368 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
369 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
370 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
375 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
378 rebuild_regno_allocno_maps (void)
381 int max_regno
, regno
;
383 ira_loop_tree_node_t loop_tree_node
;
385 ira_allocno_iterator ai
;
387 ira_assert (current_loops
!= NULL
);
388 max_regno
= max_reg_num ();
389 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
390 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
392 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
393 ira_loop_nodes
[l
].regno_allocno_map
394 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
396 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
397 sizeof (ira_allocno_t
) * max_regno
);
399 ira_free (ira_regno_allocno_map
);
400 ira_regno_allocno_map
401 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
402 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
403 FOR_EACH_ALLOCNO (a
, ai
)
405 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
406 /* Caps are not in the regno allocno maps. */
408 regno
= ALLOCNO_REGNO (a
);
409 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
410 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
411 ira_regno_allocno_map
[regno
] = a
;
412 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
413 /* Remember that we can create temporary allocnos to break
414 cycles in register shuffle. */
415 loop_tree_node
->regno_allocno_map
[regno
] = a
;
420 /* Pools for allocnos, allocno live ranges and objects. */
421 static alloc_pool allocno_pool
, live_range_pool
, object_pool
;
423 /* Vec containing references to all created allocnos. It is a
424 container of array allocnos. */
425 static vec
<ira_allocno_t
> allocno_vec
;
427 /* Vec containing references to all created ira_objects. It is a
428 container of ira_object_id_map. */
429 static vec
<ira_object_t
> ira_object_id_map_vec
;
431 /* Initialize data concerning allocnos. */
433 initiate_allocnos (void)
436 = create_alloc_pool ("live ranges",
437 sizeof (struct live_range
), 100);
439 = create_alloc_pool ("allocnos", sizeof (struct ira_allocno
), 100);
441 = create_alloc_pool ("objects", sizeof (struct ira_object
), 100);
442 allocno_vec
.create (max_reg_num () * 2);
444 ira_allocnos_num
= 0;
446 ira_object_id_map_vec
.create (max_reg_num () * 2);
447 ira_object_id_map
= NULL
;
448 ira_regno_allocno_map
449 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
450 * sizeof (ira_allocno_t
));
451 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
454 /* Create and return an object corresponding to a new allocno A. */
456 ira_create_object (ira_allocno_t a
, int subword
)
458 enum reg_class aclass
= ALLOCNO_CLASS (a
);
459 ira_object_t obj
= (ira_object_t
) pool_alloc (object_pool
);
461 OBJECT_ALLOCNO (obj
) = a
;
462 OBJECT_SUBWORD (obj
) = subword
;
463 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
464 OBJECT_CONFLICT_VEC_P (obj
) = false;
465 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
466 OBJECT_NUM_CONFLICTS (obj
) = 0;
467 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
468 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
469 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
470 reg_class_contents
[aclass
]);
471 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
472 reg_class_contents
[aclass
]);
473 OBJECT_MIN (obj
) = INT_MAX
;
474 OBJECT_MAX (obj
) = -1;
475 OBJECT_LIVE_RANGES (obj
) = NULL
;
477 ira_object_id_map_vec
.safe_push (obj
);
479 = ira_object_id_map_vec
.address ();
480 ira_objects_num
= ira_object_id_map_vec
.length ();
485 /* Create and return the allocno corresponding to REGNO in
486 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
487 same regno if CAP_P is FALSE. */
489 ira_create_allocno (int regno
, bool cap_p
,
490 ira_loop_tree_node_t loop_tree_node
)
494 a
= (ira_allocno_t
) pool_alloc (allocno_pool
);
495 ALLOCNO_REGNO (a
) = regno
;
496 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
499 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
500 ira_regno_allocno_map
[regno
] = a
;
501 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
502 /* Remember that we can create temporary allocnos to break
503 cycles in register shuffle on region borders (see
505 loop_tree_node
->regno_allocno_map
[regno
] = a
;
507 ALLOCNO_CAP (a
) = NULL
;
508 ALLOCNO_CAP_MEMBER (a
) = NULL
;
509 ALLOCNO_NUM (a
) = ira_allocnos_num
;
510 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
511 ALLOCNO_NREFS (a
) = 0;
512 ALLOCNO_FREQ (a
) = 0;
513 ALLOCNO_HARD_REGNO (a
) = -1;
514 ALLOCNO_CALL_FREQ (a
) = 0;
515 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
516 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
518 ALLOCNO_NO_STACK_REG_P (a
) = false;
519 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
521 ALLOCNO_DONT_REASSIGN_P (a
) = false;
522 ALLOCNO_BAD_SPILL_P (a
) = false;
523 ALLOCNO_ASSIGNED_P (a
) = false;
524 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
525 ALLOCNO_PREFS (a
) = NULL
;
526 ALLOCNO_COPIES (a
) = NULL
;
527 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
528 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
529 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
530 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
531 ALLOCNO_CLASS (a
) = NO_REGS
;
532 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
533 ALLOCNO_CLASS_COST (a
) = 0;
534 ALLOCNO_MEMORY_COST (a
) = 0;
535 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
536 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
537 ALLOCNO_NUM_OBJECTS (a
) = 0;
539 ALLOCNO_ADD_DATA (a
) = NULL
;
540 allocno_vec
.safe_push (a
);
541 ira_allocnos
= allocno_vec
.address ();
542 ira_allocnos_num
= allocno_vec
.length ();
547 /* Set up register class for A and update its conflict hard
550 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
552 ira_allocno_object_iterator oi
;
555 ALLOCNO_CLASS (a
) = aclass
;
556 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
558 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
559 reg_class_contents
[aclass
]);
560 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
561 reg_class_contents
[aclass
]);
565 /* Determine the number of objects we should associate with allocno A
566 and allocate them. */
568 ira_create_allocno_objects (ira_allocno_t a
)
570 enum machine_mode mode
= ALLOCNO_MODE (a
);
571 enum reg_class aclass
= ALLOCNO_CLASS (a
);
572 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
575 if (GET_MODE_SIZE (mode
) != 2 * UNITS_PER_WORD
|| n
!= 2)
578 ALLOCNO_NUM_OBJECTS (a
) = n
;
579 for (i
= 0; i
< n
; i
++)
580 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
583 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
584 ALLOCNO_OBJECT structures. This must be called after the allocno
585 classes are known. */
587 create_allocno_objects (void)
590 ira_allocno_iterator ai
;
592 FOR_EACH_ALLOCNO (a
, ai
)
593 ira_create_allocno_objects (a
);
596 /* Merge hard register conflict information for all objects associated with
597 allocno TO into the corresponding objects associated with FROM.
598 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
600 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
604 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
605 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
607 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
608 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
611 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj
),
612 OBJECT_CONFLICT_HARD_REGS (from_obj
));
613 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
),
614 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
));
617 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
618 ALLOCNO_NO_STACK_REG_P (to
) = true;
619 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
620 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
624 /* Update hard register conflict information for all objects associated with
625 A to include the regs in SET. */
627 ior_hard_reg_conflicts (ira_allocno_t a
, HARD_REG_SET
*set
)
629 ira_allocno_object_iterator i
;
632 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
634 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), *set
);
635 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), *set
);
639 /* Return TRUE if a conflict vector with NUM elements is more
640 profitable than a conflict bit vector for OBJ. */
642 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
645 int max
= OBJECT_MAX (obj
);
646 int min
= OBJECT_MIN (obj
);
649 /* We prefer a bit vector in such case because it does not result
653 nw
= (max
- min
+ IRA_INT_BITS
) / IRA_INT_BITS
;
654 return (2 * sizeof (ira_object_t
) * (num
+ 1)
655 < 3 * nw
* sizeof (IRA_INT_TYPE
));
658 /* Allocates and initialize the conflict vector of OBJ for NUM
659 conflicting objects. */
661 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
666 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
667 num
++; /* for NULL end marker */
668 size
= sizeof (ira_object_t
) * num
;
669 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
670 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
672 OBJECT_NUM_CONFLICTS (obj
) = 0;
673 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
674 OBJECT_CONFLICT_VEC_P (obj
) = true;
677 /* Allocate and initialize the conflict bit vector of OBJ. */
679 allocate_conflict_bit_vec (ira_object_t obj
)
683 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
684 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
685 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
686 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
687 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
688 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
689 OBJECT_CONFLICT_VEC_P (obj
) = false;
692 /* Allocate and initialize the conflict vector or conflict bit vector
693 of OBJ for NUM conflicting allocnos whatever is more profitable. */
695 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
697 if (ira_conflict_vector_profitable_p (obj
, num
))
698 ira_allocate_conflict_vec (obj
, num
);
700 allocate_conflict_bit_vec (obj
);
703 /* Add OBJ2 to the conflicts of OBJ1. */
705 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
710 if (OBJECT_CONFLICT_VEC_P (obj1
))
712 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
713 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
715 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
717 ira_object_t
*newvec
;
718 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
719 newvec
= (ira_object_t
*) ira_allocate (size
);
720 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
723 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
724 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
728 OBJECT_NUM_CONFLICTS (obj1
)++;
732 int nw
, added_head_nw
, id
;
733 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
735 id
= OBJECT_CONFLICT_ID (obj2
);
736 if (OBJECT_MIN (obj1
) > id
)
738 /* Expand head of the bit vector. */
739 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
740 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
741 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
742 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
744 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
745 vec
, nw
* sizeof (IRA_INT_TYPE
));
746 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
751 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
752 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
753 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
754 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
755 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
757 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
758 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
759 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
760 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
761 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
763 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
765 else if (OBJECT_MAX (obj1
) < id
)
767 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
768 size
= nw
* sizeof (IRA_INT_TYPE
);
769 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
771 /* Expand tail of the bit vector. */
772 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
773 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
774 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
775 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
776 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
777 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
778 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
779 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
781 OBJECT_MAX (obj1
) = id
;
783 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
787 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
789 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
791 add_to_conflicts (obj1
, obj2
);
792 add_to_conflicts (obj2
, obj1
);
795 /* Clear all conflicts of OBJ. */
797 clear_conflicts (ira_object_t obj
)
799 if (OBJECT_CONFLICT_VEC_P (obj
))
801 OBJECT_NUM_CONFLICTS (obj
) = 0;
802 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
804 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
808 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
809 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
813 /* The array used to find duplications in conflict vectors of
815 static int *conflict_check
;
817 /* The value used to mark allocation presence in conflict vector of
818 the current allocno. */
819 static int curr_conflict_check_tick
;
821 /* Remove duplications in conflict vector of OBJ. */
823 compress_conflict_vec (ira_object_t obj
)
825 ira_object_t
*vec
, conflict_obj
;
828 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
829 vec
= OBJECT_CONFLICT_VEC (obj
);
830 curr_conflict_check_tick
++;
831 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
833 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
834 if (conflict_check
[id
] != curr_conflict_check_tick
)
836 conflict_check
[id
] = curr_conflict_check_tick
;
837 vec
[j
++] = conflict_obj
;
840 OBJECT_NUM_CONFLICTS (obj
) = j
;
844 /* Remove duplications in conflict vectors of all allocnos. */
846 compress_conflict_vecs (void)
849 ira_object_iterator oi
;
851 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
852 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
853 curr_conflict_check_tick
= 0;
854 FOR_EACH_OBJECT (obj
, oi
)
856 if (OBJECT_CONFLICT_VEC_P (obj
))
857 compress_conflict_vec (obj
);
859 ira_free (conflict_check
);
862 /* This recursive function outputs allocno A and if it is a cap the
863 function outputs its members. */
865 ira_print_expanded_allocno (ira_allocno_t a
)
869 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
870 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
871 fprintf (ira_dump_file
, ",b%d", bb
->index
);
873 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
874 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
876 fprintf (ira_dump_file
, ":");
877 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
879 fprintf (ira_dump_file
, ")");
882 /* Create and return the cap representing allocno A in the
885 create_cap_allocno (ira_allocno_t a
)
888 ira_loop_tree_node_t parent
;
889 enum reg_class aclass
;
891 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
892 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
893 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
894 aclass
= ALLOCNO_CLASS (a
);
895 ira_set_allocno_class (cap
, aclass
);
896 ira_create_allocno_objects (cap
);
897 ALLOCNO_CAP_MEMBER (cap
) = a
;
898 ALLOCNO_CAP (a
) = cap
;
899 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
900 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
901 ira_allocate_and_copy_costs
902 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
903 ira_allocate_and_copy_costs
904 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
905 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
906 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
907 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
908 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
909 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
911 merge_hard_reg_conflicts (a
, cap
, false);
913 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
914 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
915 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
917 fprintf (ira_dump_file
, " Creating cap ");
918 ira_print_expanded_allocno (cap
);
919 fprintf (ira_dump_file
, "\n");
924 /* Create and return a live range for OBJECT with given attributes. */
926 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
931 p
= (live_range_t
) pool_alloc (live_range_pool
);
939 /* Create a new live range for OBJECT and queue it at the head of its
942 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
945 p
= ira_create_live_range (object
, start
, finish
,
946 OBJECT_LIVE_RANGES (object
));
947 OBJECT_LIVE_RANGES (object
) = p
;
950 /* Copy allocno live range R and return the result. */
952 copy_live_range (live_range_t r
)
956 p
= (live_range_t
) pool_alloc (live_range_pool
);
961 /* Copy allocno live range list given by its head R and return the
964 ira_copy_live_range_list (live_range_t r
)
966 live_range_t p
, first
, last
;
970 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
972 p
= copy_live_range (r
);
982 /* Merge ranges R1 and R2 and returns the result. The function
983 maintains the order of ranges and tries to minimize number of the
986 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
988 live_range_t first
, last
, temp
;
994 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
996 if (r1
->start
< r2
->start
)
1002 if (r1
->start
<= r2
->finish
+ 1)
1004 /* Intersected ranges: merge r1 and r2 into r1. */
1005 r1
->start
= r2
->start
;
1006 if (r1
->finish
< r2
->finish
)
1007 r1
->finish
= r2
->finish
;
1010 ira_finish_live_range (temp
);
1013 /* To try to merge with subsequent ranges in r1. */
1020 /* Add r1 to the result. */
1031 /* To try to merge with subsequent ranges in r2. */
1043 ira_assert (r1
->next
== NULL
);
1045 else if (r2
!= NULL
)
1051 ira_assert (r2
->next
== NULL
);
1055 ira_assert (last
->next
== NULL
);
1060 /* Return TRUE if live ranges R1 and R2 intersect. */
1062 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1064 /* Remember the live ranges are always kept ordered. */
1065 while (r1
!= NULL
&& r2
!= NULL
)
1067 if (r1
->start
> r2
->finish
)
1069 else if (r2
->start
> r1
->finish
)
1077 /* Free allocno live range R. */
1079 ira_finish_live_range (live_range_t r
)
1081 pool_free (live_range_pool
, r
);
1084 /* Free list of allocno live ranges starting with R. */
1086 ira_finish_live_range_list (live_range_t r
)
1088 live_range_t next_r
;
1090 for (; r
!= NULL
; r
= next_r
)
1093 ira_finish_live_range (r
);
1097 /* Free updated register costs of allocno A. */
1099 ira_free_allocno_updated_costs (ira_allocno_t a
)
1101 enum reg_class aclass
;
1103 aclass
= ALLOCNO_CLASS (a
);
1104 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1105 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1106 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1107 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1108 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1110 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1113 /* Free and nullify all cost vectors allocated earlier for allocno
1116 ira_free_allocno_costs (ira_allocno_t a
)
1118 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1120 ira_allocno_object_iterator oi
;
1122 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1124 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1125 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1126 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1127 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1128 pool_free (object_pool
, obj
);
1131 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1132 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1133 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1134 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1135 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1136 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1137 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1138 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1139 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1141 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1142 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1143 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1144 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1147 /* Free the memory allocated for allocno A. */
1149 finish_allocno (ira_allocno_t a
)
1151 ira_free_allocno_costs (a
);
1152 pool_free (allocno_pool
, a
);
1155 /* Free the memory allocated for all allocnos. */
1157 finish_allocnos (void)
1160 ira_allocno_iterator ai
;
1162 FOR_EACH_ALLOCNO (a
, ai
)
1164 ira_free (ira_regno_allocno_map
);
1165 ira_object_id_map_vec
.release ();
1166 allocno_vec
.release ();
1167 free_alloc_pool (allocno_pool
);
1168 free_alloc_pool (object_pool
);
1169 free_alloc_pool (live_range_pool
);
1174 /* Pools for allocno preferences. */
1175 static alloc_pool pref_pool
;
1177 /* Vec containing references to all created preferences. It is a
1178 container of array ira_prefs. */
1179 static vec
<ira_pref_t
> pref_vec
;
1181 /* The function initializes data concerning allocno prefs. */
1183 initiate_prefs (void)
1186 = create_alloc_pool ("prefs", sizeof (struct ira_allocno_pref
), 100);
1187 pref_vec
.create (get_max_uid ());
1192 /* Return pref for A and HARD_REGNO if any. */
1194 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1198 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1199 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1204 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1206 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1210 pref
= (ira_pref_t
) pool_alloc (pref_pool
);
1211 pref
->num
= ira_prefs_num
;
1213 pref
->hard_regno
= hard_regno
;
1215 pref_vec
.safe_push (pref
);
1216 ira_prefs
= pref_vec
.address ();
1217 ira_prefs_num
= pref_vec
.length ();
1221 /* Attach a pref PREF to the cooresponding allocno. */
1223 add_allocno_pref_to_list (ira_pref_t pref
)
1225 ira_allocno_t a
= pref
->allocno
;
1227 pref
->next_pref
= ALLOCNO_PREFS (a
);
1228 ALLOCNO_PREFS (a
) = pref
;
1231 /* Create (or update frequency if the pref already exists) the pref of
1232 allocnos A preferring HARD_REGNO with frequency FREQ. */
1234 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1240 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1245 pref
= ira_create_pref (a
, hard_regno
, freq
);
1246 ira_assert (a
!= NULL
);
1247 add_allocno_pref_to_list (pref
);
1250 /* Print info about PREF into file F. */
1252 print_pref (FILE *f
, ira_pref_t pref
)
1254 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1255 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1256 pref
->hard_regno
, pref
->freq
);
1259 /* Print info about PREF into stderr. */
1261 ira_debug_pref (ira_pref_t pref
)
1263 print_pref (stderr
, pref
);
1266 /* Print info about all prefs into file F. */
1268 print_prefs (FILE *f
)
1271 ira_pref_iterator pi
;
1273 FOR_EACH_PREF (pref
, pi
)
1274 print_pref (f
, pref
);
1277 /* Print info about all prefs into stderr. */
1279 ira_debug_prefs (void)
1281 print_prefs (stderr
);
1284 /* Print info about prefs involving allocno A into file F. */
1286 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1290 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1291 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1292 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1296 /* Print info about prefs involving allocno A into stderr. */
1298 ira_debug_allocno_prefs (ira_allocno_t a
)
1300 print_allocno_prefs (stderr
, a
);
1303 /* The function frees memory allocated for PREF. */
1305 finish_pref (ira_pref_t pref
)
1307 ira_prefs
[pref
->num
] = NULL
;
1308 pool_free (pref_pool
, pref
);
1311 /* Remove PREF from the list of allocno prefs and free memory for
1314 ira_remove_pref (ira_pref_t pref
)
1316 ira_pref_t cpref
, prev
;
1318 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1319 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1320 pref
->num
, pref
->hard_regno
, pref
->freq
);
1321 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1323 prev
= cpref
, cpref
= cpref
->next_pref
)
1326 ira_assert (cpref
!= NULL
);
1328 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1330 prev
->next_pref
= pref
->next_pref
;
1334 /* Remove all prefs of allocno A. */
1336 ira_remove_allocno_prefs (ira_allocno_t a
)
1338 ira_pref_t pref
, next_pref
;
1340 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1342 next_pref
= pref
->next_pref
;
1345 ALLOCNO_PREFS (a
) = NULL
;
1348 /* Free memory allocated for all prefs. */
1353 ira_pref_iterator pi
;
1355 FOR_EACH_PREF (pref
, pi
)
1357 pref_vec
.release ();
1358 free_alloc_pool (pref_pool
);
1363 /* Pools for copies. */
1364 static alloc_pool copy_pool
;
1366 /* Vec containing references to all created copies. It is a
1367 container of array ira_copies. */
1368 static vec
<ira_copy_t
> copy_vec
;
1370 /* The function initializes data concerning allocno copies. */
1372 initiate_copies (void)
1375 = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy
), 100);
1376 copy_vec
.create (get_max_uid ());
1381 /* Return copy connecting A1 and A2 and originated from INSN of
1382 LOOP_TREE_NODE if any. */
1384 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx insn
,
1385 ira_loop_tree_node_t loop_tree_node
)
1387 ira_copy_t cp
, next_cp
;
1388 ira_allocno_t another_a
;
1390 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1392 if (cp
->first
== a1
)
1394 next_cp
= cp
->next_first_allocno_copy
;
1395 another_a
= cp
->second
;
1397 else if (cp
->second
== a1
)
1399 next_cp
= cp
->next_second_allocno_copy
;
1400 another_a
= cp
->first
;
1404 if (another_a
== a2
&& cp
->insn
== insn
1405 && cp
->loop_tree_node
== loop_tree_node
)
1411 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1412 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1414 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1415 bool constraint_p
, rtx insn
,
1416 ira_loop_tree_node_t loop_tree_node
)
1420 cp
= (ira_copy_t
) pool_alloc (copy_pool
);
1421 cp
->num
= ira_copies_num
;
1423 cp
->second
= second
;
1425 cp
->constraint_p
= constraint_p
;
1427 cp
->loop_tree_node
= loop_tree_node
;
1428 copy_vec
.safe_push (cp
);
1429 ira_copies
= copy_vec
.address ();
1430 ira_copies_num
= copy_vec
.length ();
1434 /* Attach a copy CP to allocnos involved into the copy. */
1436 add_allocno_copy_to_list (ira_copy_t cp
)
1438 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1440 cp
->prev_first_allocno_copy
= NULL
;
1441 cp
->prev_second_allocno_copy
= NULL
;
1442 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1443 if (cp
->next_first_allocno_copy
!= NULL
)
1445 if (cp
->next_first_allocno_copy
->first
== first
)
1446 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1448 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1450 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1451 if (cp
->next_second_allocno_copy
!= NULL
)
1453 if (cp
->next_second_allocno_copy
->second
== second
)
1454 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1456 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1458 ALLOCNO_COPIES (first
) = cp
;
1459 ALLOCNO_COPIES (second
) = cp
;
1462 /* Make a copy CP a canonical copy where number of the
1463 first allocno is less than the second one. */
1465 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1470 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1474 cp
->first
= cp
->second
;
1477 temp_cp
= cp
->prev_first_allocno_copy
;
1478 cp
->prev_first_allocno_copy
= cp
->prev_second_allocno_copy
;
1479 cp
->prev_second_allocno_copy
= temp_cp
;
1481 temp_cp
= cp
->next_first_allocno_copy
;
1482 cp
->next_first_allocno_copy
= cp
->next_second_allocno_copy
;
1483 cp
->next_second_allocno_copy
= temp_cp
;
1486 /* Create (or update frequency if the copy already exists) and return
1487 the copy of allocnos FIRST and SECOND with frequency FREQ
1488 corresponding to move insn INSN (if any) and originated from
1491 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1492 bool constraint_p
, rtx insn
,
1493 ira_loop_tree_node_t loop_tree_node
)
1497 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1502 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1504 ira_assert (first
!= NULL
&& second
!= NULL
);
1505 add_allocno_copy_to_list (cp
);
1506 swap_allocno_copy_ends_if_necessary (cp
);
1510 /* Print info about copy CP into file F. */
1512 print_copy (FILE *f
, ira_copy_t cp
)
1514 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1515 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1516 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1518 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1522 debug (ira_allocno_copy
&ref
)
1524 print_copy (stderr
, &ref
);
1528 debug (ira_allocno_copy
*ptr
)
1533 fprintf (stderr
, "<nil>\n");
1536 /* Print info about copy CP into stderr. */
1538 ira_debug_copy (ira_copy_t cp
)
1540 print_copy (stderr
, cp
);
1543 /* Print info about all copies into file F. */
1545 print_copies (FILE *f
)
1548 ira_copy_iterator ci
;
1550 FOR_EACH_COPY (cp
, ci
)
1554 /* Print info about all copies into stderr. */
1556 ira_debug_copies (void)
1558 print_copies (stderr
);
1561 /* Print info about copies involving allocno A into file F. */
1563 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1565 ira_allocno_t another_a
;
1566 ira_copy_t cp
, next_cp
;
1568 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1569 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1573 next_cp
= cp
->next_first_allocno_copy
;
1574 another_a
= cp
->second
;
1576 else if (cp
->second
== a
)
1578 next_cp
= cp
->next_second_allocno_copy
;
1579 another_a
= cp
->first
;
1583 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1584 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1590 debug (ira_allocno
&ref
)
1592 print_allocno_copies (stderr
, &ref
);
1596 debug (ira_allocno
*ptr
)
1601 fprintf (stderr
, "<nil>\n");
1605 /* Print info about copies involving allocno A into stderr. */
1607 ira_debug_allocno_copies (ira_allocno_t a
)
1609 print_allocno_copies (stderr
, a
);
1612 /* The function frees memory allocated for copy CP. */
1614 finish_copy (ira_copy_t cp
)
1616 pool_free (copy_pool
, cp
);
1620 /* Free memory allocated for all copies. */
1622 finish_copies (void)
1625 ira_copy_iterator ci
;
1627 FOR_EACH_COPY (cp
, ci
)
1629 copy_vec
.release ();
1630 free_alloc_pool (copy_pool
);
1635 /* Pools for cost vectors. It is defined only for allocno classes. */
1636 static alloc_pool cost_vector_pool
[N_REG_CLASSES
];
1638 /* The function initiates work with hard register cost vectors. It
1639 creates allocation pool for each allocno class. */
1641 initiate_cost_vectors (void)
1644 enum reg_class aclass
;
1646 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1648 aclass
= ira_allocno_classes
[i
];
1649 cost_vector_pool
[aclass
]
1650 = create_alloc_pool ("cost vectors",
1651 sizeof (int) * ira_class_hard_regs_num
[aclass
],
1656 /* Allocate and return a cost vector VEC for ACLASS. */
1658 ira_allocate_cost_vector (reg_class_t aclass
)
1660 return (int *) pool_alloc (cost_vector_pool
[(int) aclass
]);
1663 /* Free a cost vector VEC for ACLASS. */
1665 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1667 ira_assert (vec
!= NULL
);
1668 pool_free (cost_vector_pool
[(int) aclass
], vec
);
1671 /* Finish work with hard register cost vectors. Release allocation
1672 pool for each allocno class. */
1674 finish_cost_vectors (void)
1677 enum reg_class aclass
;
1679 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1681 aclass
= ira_allocno_classes
[i
];
1682 free_alloc_pool (cost_vector_pool
[aclass
]);
1688 /* Compute a post-ordering of the reverse control flow of the loop body
1689 designated by the children nodes of LOOP_NODE, whose body nodes in
1690 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1691 of the reverse loop body.
1693 For the post-order of the reverse CFG, we visit the basic blocks in
1694 LOOP_PREORDER array in the reverse order of where they appear.
1695 This is important: We do not just want to compute a post-order of
1696 the reverse CFG, we want to make a best-guess for a visiting order that
1697 minimizes the number of chain elements per allocno live range. If the
1698 blocks would be visited in a different order, we would still compute a
1699 correct post-ordering but it would be less likely that two nodes
1700 connected by an edge in the CFG are neighbours in the topsort. */
1702 static vec
<ira_loop_tree_node_t
>
1703 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1704 vec
<ira_loop_tree_node_t
> loop_preorder
)
1706 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1707 unsigned int n_loop_preorder
;
1709 n_loop_preorder
= loop_preorder
.length ();
1710 if (n_loop_preorder
!= 0)
1712 ira_loop_tree_node_t subloop_node
;
1714 vec
<ira_loop_tree_node_t
> dfs_stack
;
1716 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1717 the flag to mark blocks we still have to visit to add them to
1718 our post-order. Define an alias to avoid confusion. */
1719 #define BB_TO_VISIT BB_VISITED
1721 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1723 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1724 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1727 topsort_nodes
.create (n_loop_preorder
);
1728 dfs_stack
.create (n_loop_preorder
);
1730 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1732 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1735 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1736 dfs_stack
.quick_push (subloop_node
);
1737 while (! dfs_stack
.is_empty ())
1742 ira_loop_tree_node_t n
= dfs_stack
.last ();
1743 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1745 ira_loop_tree_node_t pred_node
;
1746 basic_block pred_bb
= e
->src
;
1748 if (e
->src
== ENTRY_BLOCK_PTR
)
1751 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1753 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1755 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1756 dfs_stack
.quick_push (pred_node
);
1759 if (n
== dfs_stack
.last ())
1762 topsort_nodes
.quick_push (n
);
1768 dfs_stack
.release ();
1771 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1772 return topsort_nodes
;
1775 /* The current loop tree node and its regno allocno map. */
1776 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1777 ira_allocno_t
*ira_curr_regno_allocno_map
;
1779 /* This recursive function traverses loop tree with root LOOP_NODE
1780 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1781 correspondingly in preorder and postorder. The function sets up
1782 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1783 basic block nodes of LOOP_NODE is also processed (before its
1786 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1787 the loop are passed in the *reverse* post-order of the *reverse*
1788 CFG. This is only used by ira_create_allocno_live_ranges, which
1789 wants to visit basic blocks in this order to minimize the number
1790 of elements per live range chain.
1791 Note that the loop tree nodes are still visited in the normal,
1792 forward post-order of the loop tree. */
1795 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1796 void (*preorder_func
) (ira_loop_tree_node_t
),
1797 void (*postorder_func
) (ira_loop_tree_node_t
))
1799 ira_loop_tree_node_t subloop_node
;
1801 ira_assert (loop_node
->bb
== NULL
);
1802 ira_curr_loop_tree_node
= loop_node
;
1803 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1805 if (preorder_func
!= NULL
)
1806 (*preorder_func
) (loop_node
);
1810 vec
<ira_loop_tree_node_t
>
1811 loop_preorder
= vNULL
;
1814 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1815 is set up such that nodes in the loop body appear in a pre-order
1816 of their place in the CFG. */
1817 for (subloop_node
= loop_node
->children
;
1818 subloop_node
!= NULL
;
1819 subloop_node
= subloop_node
->next
)
1820 if (subloop_node
->bb
!= NULL
)
1821 loop_preorder
.safe_push (subloop_node
);
1823 if (preorder_func
!= NULL
)
1824 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1825 (*preorder_func
) (subloop_node
);
1827 if (postorder_func
!= NULL
)
1829 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1830 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1831 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1832 (*postorder_func
) (subloop_node
);
1833 loop_rev_postorder
.release ();
1836 loop_preorder
.release ();
1839 for (subloop_node
= loop_node
->subloops
;
1840 subloop_node
!= NULL
;
1841 subloop_node
= subloop_node
->subloop_next
)
1843 ira_assert (subloop_node
->bb
== NULL
);
1844 ira_traverse_loop_tree (bb_p
, subloop_node
,
1845 preorder_func
, postorder_func
);
1848 ira_curr_loop_tree_node
= loop_node
;
1849 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1851 if (postorder_func
!= NULL
)
1852 (*postorder_func
) (loop_node
);
1857 /* The basic block currently being processed. */
1858 static basic_block curr_bb
;
1860 /* This recursive function creates allocnos corresponding to
1861 pseudo-registers containing in X. True OUTPUT_P means that X is
1864 create_insn_allocnos (rtx x
, bool output_p
)
1868 enum rtx_code code
= GET_CODE (x
);
1874 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1878 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1879 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1881 ALLOCNO_NREFS (a
)++;
1882 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1884 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1888 else if (code
== SET
)
1890 create_insn_allocnos (SET_DEST (x
), true);
1891 create_insn_allocnos (SET_SRC (x
), false);
1894 else if (code
== CLOBBER
)
1896 create_insn_allocnos (XEXP (x
, 0), true);
1899 else if (code
== MEM
)
1901 create_insn_allocnos (XEXP (x
, 0), false);
1904 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1905 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1907 create_insn_allocnos (XEXP (x
, 0), true);
1908 create_insn_allocnos (XEXP (x
, 0), false);
1912 fmt
= GET_RTX_FORMAT (code
);
1913 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1916 create_insn_allocnos (XEXP (x
, i
), output_p
);
1917 else if (fmt
[i
] == 'E')
1918 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1919 create_insn_allocnos (XVECEXP (x
, i
, j
), output_p
);
1923 /* Create allocnos corresponding to pseudo-registers living in the
1924 basic block represented by the corresponding loop tree node
1927 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1934 curr_bb
= bb
= bb_node
->bb
;
1935 ira_assert (bb
!= NULL
);
1936 FOR_BB_INSNS_REVERSE (bb
, insn
)
1937 if (NONDEBUG_INSN_P (insn
))
1938 create_insn_allocnos (PATTERN (insn
), false);
1939 /* It might be a allocno living through from one subloop to
1941 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1942 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1943 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1946 /* Create allocnos corresponding to pseudo-registers living on edge E
1947 (a loop entry or exit). Also mark the allocnos as living on the
1950 create_loop_allocnos (edge e
)
1953 bitmap live_in_regs
, border_allocnos
;
1955 ira_loop_tree_node_t parent
;
1957 live_in_regs
= df_get_live_in (e
->dest
);
1958 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1959 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1960 FIRST_PSEUDO_REGISTER
, i
, bi
)
1961 if (bitmap_bit_p (live_in_regs
, i
))
1963 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1965 /* The order of creations is important for right
1966 ira_regno_allocno_map. */
1967 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1968 && parent
->regno_allocno_map
[i
] == NULL
)
1969 ira_create_allocno (i
, false, parent
);
1970 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1972 bitmap_set_bit (border_allocnos
,
1973 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1977 /* Create allocnos corresponding to pseudo-registers living in loop
1978 represented by the corresponding loop tree node LOOP_NODE. This
1979 function is called by ira_traverse_loop_tree. */
1981 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1983 if (loop_node
->bb
!= NULL
)
1984 create_bb_allocnos (loop_node
);
1985 else if (loop_node
!= ira_loop_tree_root
)
1992 ira_assert (current_loops
!= NULL
);
1993 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1994 if (e
->src
!= loop_node
->loop
->latch
)
1995 create_loop_allocnos (e
);
1997 edges
= get_loop_exit_edges (loop_node
->loop
);
1998 FOR_EACH_VEC_ELT (edges
, i
, e
)
1999 create_loop_allocnos (e
);
2004 /* Propagate information about allocnos modified inside the loop given
2005 by its LOOP_TREE_NODE to its parent. */
2007 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
2009 if (loop_tree_node
== ira_loop_tree_root
)
2011 ira_assert (loop_tree_node
->bb
== NULL
);
2012 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
2013 loop_tree_node
->modified_regnos
);
2016 /* Propagate new info about allocno A (see comments about accumulated
2017 info in allocno definition) to the corresponding allocno on upper
2018 loop tree level. So allocnos on upper levels accumulate
2019 information about the corresponding allocnos in nested regions.
2020 The new info means allocno info finally calculated in this
2023 propagate_allocno_info (void)
2026 ira_allocno_t a
, parent_a
;
2027 ira_loop_tree_node_t parent
;
2028 enum reg_class aclass
;
2030 if (flag_ira_region
!= IRA_REGION_ALL
2031 && flag_ira_region
!= IRA_REGION_MIXED
)
2033 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2034 for (a
= ira_regno_allocno_map
[i
];
2036 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2037 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2038 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2039 /* There are no caps yet at this point. So use
2040 border_allocnos to find allocnos for the propagation. */
2041 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2044 if (! ALLOCNO_BAD_SPILL_P (a
))
2045 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2046 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2047 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2048 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2049 merge_hard_reg_conflicts (a
, parent_a
, true);
2050 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2051 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2052 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2053 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2054 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2055 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2056 aclass
= ALLOCNO_CLASS (a
);
2057 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2058 ira_allocate_and_accumulate_costs
2059 (&ALLOCNO_HARD_REG_COSTS (parent_a
), aclass
,
2060 ALLOCNO_HARD_REG_COSTS (a
));
2061 ira_allocate_and_accumulate_costs
2062 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2064 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2065 ALLOCNO_CLASS_COST (parent_a
)
2066 += ALLOCNO_CLASS_COST (a
);
2067 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2071 /* Create allocnos corresponding to pseudo-registers in the current
2072 function. Traverse the loop tree for this. */
2074 create_allocnos (void)
2076 /* We need to process BB first to correctly link allocnos by member
2077 next_regno_allocno. */
2078 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2079 create_loop_tree_node_allocnos
, NULL
);
2081 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2082 propagate_modified_regnos
);
2087 /* The page contains function to remove some regions from a separate
2088 register allocation. We remove regions whose separate allocation
2089 will hardly improve the result. As a result we speed up regional
2090 register allocation. */
2092 /* The function changes the object in range list given by R to OBJ. */
2094 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2096 for (; r
!= NULL
; r
= r
->next
)
2100 /* Move all live ranges associated with allocno FROM to allocno TO. */
2102 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2105 int n
= ALLOCNO_NUM_OBJECTS (from
);
2107 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2109 for (i
= 0; i
< n
; i
++)
2111 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2112 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2113 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2115 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2117 fprintf (ira_dump_file
,
2118 " Moving ranges of a%dr%d to a%dr%d: ",
2119 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2120 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2121 ira_print_live_range_list (ira_dump_file
, lr
);
2123 change_object_in_range_list (lr
, to_obj
);
2124 OBJECT_LIVE_RANGES (to_obj
)
2125 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2126 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2131 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2134 int n
= ALLOCNO_NUM_OBJECTS (from
);
2136 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2138 for (i
= 0; i
< n
; i
++)
2140 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2141 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2142 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2144 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2146 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2147 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2148 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2149 ira_print_live_range_list (ira_dump_file
, lr
);
2151 lr
= ira_copy_live_range_list (lr
);
2152 change_object_in_range_list (lr
, to_obj
);
2153 OBJECT_LIVE_RANGES (to_obj
)
2154 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2158 /* Return TRUE if NODE represents a loop with low register
2161 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2164 enum reg_class pclass
;
2166 if (node
->bb
!= NULL
)
2169 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2171 pclass
= ira_pressure_classes
[i
];
2172 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2173 && ira_class_hard_regs_num
[pclass
] > 1)
2180 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2181 form a region from such loop if the target use stack register
2182 because reg-stack.c can not deal with such edges. */
2184 loop_with_complex_edge_p (struct loop
*loop
)
2192 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2193 if (e
->flags
& EDGE_EH
)
2195 edges
= get_loop_exit_edges (loop
);
2197 FOR_EACH_VEC_ELT (edges
, i
, e
)
2198 if (e
->flags
& EDGE_COMPLEX
)
2208 /* Sort loops for marking them for removal. We put already marked
2209 loops first, then less frequent loops next, and then outer loops
2212 loop_compare_func (const void *v1p
, const void *v2p
)
2215 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2216 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2218 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2219 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2221 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2223 if ((diff
= l1
->loop
->header
->frequency
- l2
->loop
->header
->frequency
) != 0)
2225 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2227 /* Make sorting stable. */
2228 return l1
->loop_num
- l2
->loop_num
;
2231 /* Mark loops which should be removed from regional allocation. We
2232 remove a loop with low register pressure inside another loop with
2233 register pressure. In this case a separate allocation of the loop
2234 hardly helps (for irregular register file architecture it could
2235 help by choosing a better hard register in the loop but we prefer
2236 faster allocation even in this case). We also remove cheap loops
2237 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2238 exit or enter edges are removed too because the allocation might
2239 require put pseudo moves on the EH edges (we could still do this
2240 for pseudos with caller saved hard registers in some cases but it
2241 is impossible to say here or during top-down allocation pass what
2242 hard register the pseudos get finally). */
2244 mark_loops_for_removal (void)
2247 ira_loop_tree_node_t
*sorted_loops
;
2250 ira_assert (current_loops
!= NULL
);
2252 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2253 * number_of_loops (cfun
));
2254 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2255 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2257 if (ira_loop_nodes
[i
].parent
== NULL
)
2259 /* Don't remove the root. */
2260 ira_loop_nodes
[i
].to_remove_p
= false;
2263 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2264 ira_loop_nodes
[i
].to_remove_p
2265 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2266 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2268 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2272 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2273 for (i
= 0; n
- i
+ 1 > IRA_MAX_LOOPS_NUM
; i
++)
2275 sorted_loops
[i
]->to_remove_p
= true;
2276 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2279 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2280 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2281 sorted_loops
[i
]->loop
->header
->frequency
,
2282 loop_depth (sorted_loops
[i
]->loop
),
2283 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2284 && low_pressure_loop_node_p (sorted_loops
[i
])
2285 ? "low pressure" : "cheap loop");
2287 ira_free (sorted_loops
);
2290 /* Mark all loops but root for removing. */
2292 mark_all_loops_for_removal (void)
2297 ira_assert (current_loops
!= NULL
);
2298 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2299 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2301 if (ira_loop_nodes
[i
].parent
== NULL
)
2303 /* Don't remove the root. */
2304 ira_loop_nodes
[i
].to_remove_p
= false;
2307 ira_loop_nodes
[i
].to_remove_p
= true;
2308 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2311 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2312 ira_loop_nodes
[i
].loop_num
,
2313 ira_loop_nodes
[i
].loop
->header
->index
,
2314 ira_loop_nodes
[i
].loop
->header
->frequency
,
2315 loop_depth (ira_loop_nodes
[i
].loop
));
2319 /* Definition of vector of loop tree nodes. */
2321 /* Vec containing references to all removed loop tree nodes. */
2322 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2324 /* Vec containing references to all children of loop tree nodes. */
2325 static vec
<ira_loop_tree_node_t
> children_vec
;
2327 /* Remove subregions of NODE if their separate allocation will not
2328 improve the result. */
2330 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2334 ira_loop_tree_node_t subnode
;
2336 remove_p
= node
->to_remove_p
;
2338 children_vec
.safe_push (node
);
2339 start
= children_vec
.length ();
2340 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2341 if (subnode
->bb
== NULL
)
2342 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2344 children_vec
.safe_push (subnode
);
2345 node
->children
= node
->subloops
= NULL
;
2348 removed_loop_vec
.safe_push (node
);
2351 while (children_vec
.length () > start
)
2353 subnode
= children_vec
.pop ();
2354 subnode
->parent
= node
;
2355 subnode
->next
= node
->children
;
2356 node
->children
= subnode
;
2357 if (subnode
->bb
== NULL
)
2359 subnode
->subloop_next
= node
->subloops
;
2360 node
->subloops
= subnode
;
2365 /* Return TRUE if NODE is inside PARENT. */
2367 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2369 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2375 /* Sort allocnos according to their order in regno allocno list. */
2377 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2379 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2380 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2381 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2382 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2384 if (loop_is_inside_p (n1
, n2
))
2386 else if (loop_is_inside_p (n2
, n1
))
2388 /* If allocnos are equally good, sort by allocno numbers, so that
2389 the results of qsort leave nothing to chance. We put allocnos
2390 with higher number first in the list because it is the original
2391 order for allocnos from loops on the same levels. */
2392 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2395 /* This array is used to sort allocnos to restore allocno order in
2396 the regno allocno list. */
2397 static ira_allocno_t
*regno_allocnos
;
2399 /* Restore allocno order for REGNO in the regno allocno list. */
2401 ira_rebuild_regno_allocno_list (int regno
)
2406 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2408 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2409 regno_allocnos
[n
++] = a
;
2411 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2412 regno_allocno_order_compare_func
);
2413 for (i
= 1; i
< n
; i
++)
2414 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2415 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2416 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2417 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2418 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2421 /* Propagate info from allocno FROM_A to allocno A. */
2423 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2425 enum reg_class aclass
;
2427 merge_hard_reg_conflicts (from_a
, a
, false);
2428 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2429 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2430 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2431 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2432 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2433 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2434 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2435 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2436 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2437 ALLOCNO_BAD_SPILL_P (a
) = false;
2438 aclass
= ALLOCNO_CLASS (from_a
);
2439 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2440 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2441 ALLOCNO_HARD_REG_COSTS (from_a
));
2442 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2444 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2445 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2446 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2449 /* Remove allocnos from loops removed from the allocation
2452 remove_unnecessary_allocnos (void)
2455 bool merged_p
, rebuild_p
;
2456 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2457 ira_loop_tree_node_t a_node
, parent
;
2460 regno_allocnos
= NULL
;
2461 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2464 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2468 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2469 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2470 if (! a_node
->to_remove_p
)
2474 for (parent
= a_node
->parent
;
2475 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2476 && parent
->to_remove_p
;
2477 parent
= parent
->parent
)
2479 if (parent_a
== NULL
)
2481 /* There are no allocnos with the same regno in
2482 upper region -- just move the allocno to the
2485 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2486 parent
->regno_allocno_map
[regno
] = a
;
2487 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2492 /* Remove the allocno and update info of allocno in
2493 the upper region. */
2495 ira_regno_allocno_map
[regno
] = next_a
;
2497 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2498 move_allocno_live_ranges (a
, parent_a
);
2500 propagate_some_info_from_allocno (parent_a
, a
);
2501 /* Remove it from the corresponding regno allocno
2502 map to avoid info propagation of subsequent
2503 allocno into this already removed allocno. */
2504 a_node
->regno_allocno_map
[regno
] = NULL
;
2505 ira_remove_allocno_prefs (a
);
2511 /* We need to restore the order in regno allocno list. */
2513 if (regno_allocnos
== NULL
)
2515 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2516 * ira_allocnos_num
);
2517 ira_rebuild_regno_allocno_list (regno
);
2521 ira_rebuild_start_finish_chains ();
2522 if (regno_allocnos
!= NULL
)
2523 ira_free (regno_allocnos
);
2526 /* Remove allocnos from all loops but the root. */
2528 remove_low_level_allocnos (void)
2531 bool merged_p
, propagate_p
;
2532 ira_allocno_t a
, top_a
;
2533 ira_loop_tree_node_t a_node
, parent
;
2534 ira_allocno_iterator ai
;
2537 FOR_EACH_ALLOCNO (a
, ai
)
2539 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2540 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2542 regno
= ALLOCNO_REGNO (a
);
2543 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2545 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2546 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2549 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2550 /* Remove the allocno and update info of allocno in the upper
2552 move_allocno_live_ranges (a
, top_a
);
2555 propagate_some_info_from_allocno (top_a
, a
);
2557 FOR_EACH_ALLOCNO (a
, ai
)
2559 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2560 if (a_node
== ira_loop_tree_root
)
2562 parent
= a_node
->parent
;
2563 regno
= ALLOCNO_REGNO (a
);
2564 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2565 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2566 else if (ALLOCNO_CAP (a
) == NULL
)
2567 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2569 FOR_EACH_ALLOCNO (a
, ai
)
2571 regno
= ALLOCNO_REGNO (a
);
2572 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2575 ira_allocno_object_iterator oi
;
2577 ira_regno_allocno_map
[regno
] = a
;
2578 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2579 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2580 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2581 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
2582 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
2584 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2585 ALLOCNO_NO_STACK_REG_P (a
) = true;
2590 ira_remove_allocno_prefs (a
);
2595 ira_rebuild_start_finish_chains ();
2598 /* Remove loops from consideration. We remove all loops except for
2599 root if ALL_P or loops for which a separate allocation will not
2600 improve the result. We have to do this after allocno creation and
2601 their costs and allocno class evaluation because only after that
2602 the register pressure can be known and is calculated. */
2604 remove_unnecessary_regions (bool all_p
)
2606 if (current_loops
== NULL
)
2609 mark_all_loops_for_removal ();
2611 mark_loops_for_removal ();
2612 children_vec
.create (last_basic_block
+ number_of_loops (cfun
));
2613 removed_loop_vec
.create (last_basic_block
+ number_of_loops (cfun
));
2614 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2615 children_vec
.release ();
2617 remove_low_level_allocnos ();
2619 remove_unnecessary_allocnos ();
2620 while (removed_loop_vec
.length () > 0)
2621 finish_loop_tree_node (removed_loop_vec
.pop ());
2622 removed_loop_vec
.release ();
2627 /* At this point true value of allocno attribute bad_spill_p means
2628 that there is an insn where allocno occurs and where the allocno
2629 can not be used as memory. The function updates the attribute, now
2630 it can be true only for allocnos which can not be used as memory in
2631 an insn and in whose live ranges there is other allocno deaths.
2632 Spilling allocnos with true value will not improve the code because
2633 it will not make other allocnos colorable and additional reloads
2634 for the corresponding pseudo will be generated in reload pass for
2635 each insn it occurs.
2637 This is a trick mentioned in one classic article of Chaitin etc
2638 which is frequently omitted in other implementations of RA based on
2641 update_bad_spill_attribute (void)
2645 ira_allocno_iterator ai
;
2646 ira_allocno_object_iterator aoi
;
2649 enum reg_class aclass
;
2650 bitmap_head dead_points
[N_REG_CLASSES
];
2652 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2654 aclass
= ira_allocno_classes
[i
];
2655 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2657 FOR_EACH_ALLOCNO (a
, ai
)
2659 aclass
= ALLOCNO_CLASS (a
);
2660 if (aclass
== NO_REGS
)
2662 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2663 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2664 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2666 FOR_EACH_ALLOCNO (a
, ai
)
2668 aclass
= ALLOCNO_CLASS (a
);
2669 if (aclass
== NO_REGS
)
2671 if (! ALLOCNO_BAD_SPILL_P (a
))
2673 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2675 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2677 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2678 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2685 ALLOCNO_BAD_SPILL_P (a
) = false;
2690 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2692 aclass
= ira_allocno_classes
[i
];
2693 bitmap_clear (&dead_points
[aclass
]);
2699 /* Set up minimal and maximal live range points for allocnos. */
2701 setup_min_max_allocno_live_range_point (void)
2704 ira_allocno_t a
, parent_a
, cap
;
2705 ira_allocno_iterator ai
;
2706 #ifdef ENABLE_IRA_CHECKING
2707 ira_object_iterator oi
;
2711 ira_loop_tree_node_t parent
;
2713 FOR_EACH_ALLOCNO (a
, ai
)
2715 int n
= ALLOCNO_NUM_OBJECTS (a
);
2717 for (i
= 0; i
< n
; i
++)
2719 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2720 r
= OBJECT_LIVE_RANGES (obj
);
2723 OBJECT_MAX (obj
) = r
->finish
;
2724 for (; r
->next
!= NULL
; r
= r
->next
)
2726 OBJECT_MIN (obj
) = r
->start
;
2729 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2730 for (a
= ira_regno_allocno_map
[i
];
2732 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2735 int n
= ALLOCNO_NUM_OBJECTS (a
);
2737 for (j
= 0; j
< n
; j
++)
2739 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2740 ira_object_t parent_obj
;
2742 if (OBJECT_MAX (obj
) < 0)
2744 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2745 /* Accumulation of range info. */
2746 if (ALLOCNO_CAP (a
) != NULL
)
2748 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2750 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2751 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2752 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2753 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2754 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2758 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2760 parent_a
= parent
->regno_allocno_map
[i
];
2761 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2762 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2763 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2764 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2765 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2768 #ifdef ENABLE_IRA_CHECKING
2769 FOR_EACH_OBJECT (obj
, oi
)
2771 if ((0 <= OBJECT_MIN (obj
) && OBJECT_MIN (obj
) <= ira_max_point
)
2772 && (0 <= OBJECT_MAX (obj
) && OBJECT_MAX (obj
) <= ira_max_point
))
2779 /* Sort allocnos according to their live ranges. Allocnos with
2780 smaller allocno class are put first unless we use priority
2781 coloring. Allocnos with the same class are ordered according
2782 their start (min). Allocnos with the same start are ordered
2783 according their finish (max). */
2785 object_range_compare_func (const void *v1p
, const void *v2p
)
2788 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2789 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2790 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2791 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2793 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2795 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2797 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2800 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2802 sort_conflict_id_map (void)
2806 ira_allocno_iterator ai
;
2809 FOR_EACH_ALLOCNO (a
, ai
)
2811 ira_allocno_object_iterator oi
;
2814 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2815 ira_object_id_map
[num
++] = obj
;
2817 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2818 object_range_compare_func
);
2819 for (i
= 0; i
< num
; i
++)
2821 ira_object_t obj
= ira_object_id_map
[i
];
2823 gcc_assert (obj
!= NULL
);
2824 OBJECT_CONFLICT_ID (obj
) = i
;
2826 for (i
= num
; i
< ira_objects_num
; i
++)
2827 ira_object_id_map
[i
] = NULL
;
2830 /* Set up minimal and maximal conflict ids of allocnos with which
2831 given allocno can conflict. */
2833 setup_min_max_conflict_allocno_ids (void)
2836 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2837 int *live_range_min
, *last_lived
;
2838 int word0_min
, word0_max
;
2840 ira_allocno_iterator ai
;
2842 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2844 first_not_finished
= -1;
2845 for (i
= 0; i
< ira_objects_num
; i
++)
2847 ira_object_t obj
= ira_object_id_map
[i
];
2852 a
= OBJECT_ALLOCNO (obj
);
2856 aclass
= ALLOCNO_CLASS (a
);
2858 first_not_finished
= i
;
2862 start
= OBJECT_MIN (obj
);
2863 /* If we skip an allocno, the allocno with smaller ids will
2864 be also skipped because of the secondary sorting the
2865 range finishes (see function
2866 object_range_compare_func). */
2867 while (first_not_finished
< i
2868 && start
> OBJECT_MAX (ira_object_id_map
2869 [first_not_finished
]))
2870 first_not_finished
++;
2871 min
= first_not_finished
;
2874 /* We could increase min further in this case but it is good
2877 live_range_min
[i
] = OBJECT_MIN (obj
);
2878 OBJECT_MIN (obj
) = min
;
2880 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2882 filled_area_start
= -1;
2883 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2885 ira_object_t obj
= ira_object_id_map
[i
];
2890 a
= OBJECT_ALLOCNO (obj
);
2893 aclass
= ALLOCNO_CLASS (a
);
2894 for (j
= 0; j
< ira_max_point
; j
++)
2896 filled_area_start
= ira_max_point
;
2898 min
= live_range_min
[i
];
2899 finish
= OBJECT_MAX (obj
);
2900 max
= last_lived
[finish
];
2902 /* We could decrease max further in this case but it is good
2904 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2905 OBJECT_MAX (obj
) = max
;
2906 /* In filling, we can go further A range finish to recognize
2907 intersection quickly because if the finish of subsequently
2908 processed allocno (it has smaller conflict id) range is
2909 further A range finish than they are definitely intersected
2910 (the reason for this is the allocnos with bigger conflict id
2911 have their range starts not smaller than allocnos with
2913 for (j
= min
; j
< filled_area_start
; j
++)
2915 filled_area_start
= min
;
2917 ira_free (last_lived
);
2918 ira_free (live_range_min
);
2920 /* For allocnos with more than one object, we may later record extra conflicts in
2921 subobject 0 that we cannot really know about here.
2922 For now, simply widen the min/max range of these subobjects. */
2924 word0_min
= INT_MAX
;
2925 word0_max
= INT_MIN
;
2927 FOR_EACH_ALLOCNO (a
, ai
)
2929 int n
= ALLOCNO_NUM_OBJECTS (a
);
2934 obj0
= ALLOCNO_OBJECT (a
, 0);
2935 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2936 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2937 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2938 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2940 FOR_EACH_ALLOCNO (a
, ai
)
2942 int n
= ALLOCNO_NUM_OBJECTS (a
);
2947 obj0
= ALLOCNO_OBJECT (a
, 0);
2948 if (OBJECT_MIN (obj0
) > word0_min
)
2949 OBJECT_MIN (obj0
) = word0_min
;
2950 if (OBJECT_MAX (obj0
) < word0_max
)
2951 OBJECT_MAX (obj0
) = word0_max
;
2961 ira_allocno_iterator ai
;
2962 ira_loop_tree_node_t loop_tree_node
;
2964 FOR_EACH_ALLOCNO (a
, ai
)
2966 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
2968 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2969 create_cap_allocno (a
);
2970 else if (ALLOCNO_CAP (a
) == NULL
)
2972 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2973 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2974 create_cap_allocno (a
);
2981 /* The page contains code transforming more one region internal
2982 representation (IR) to one region IR which is necessary for reload.
2983 This transformation is called IR flattening. We might just rebuild
2984 the IR for one region but we don't do it because it takes a lot of
2987 /* Map: regno -> allocnos which will finally represent the regno for
2988 IR with one region. */
2989 static ira_allocno_t
*regno_top_level_allocno_map
;
2991 /* Find the allocno that corresponds to A at a level one higher up in the
2992 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2994 ira_parent_allocno (ira_allocno_t a
)
2996 ira_loop_tree_node_t parent
;
2998 if (ALLOCNO_CAP (a
) != NULL
)
3001 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3005 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3008 /* Find the allocno that corresponds to A at a level one higher up in the
3009 loop tree. If ALLOCNO_CAP is set for A, return that. */
3011 ira_parent_or_cap_allocno (ira_allocno_t a
)
3013 if (ALLOCNO_CAP (a
) != NULL
)
3014 return ALLOCNO_CAP (a
);
3016 return ira_parent_allocno (a
);
3019 /* Process all allocnos originated from pseudo REGNO and copy live
3020 ranges, hard reg conflicts, and allocno stack reg attributes from
3021 low level allocnos to final allocnos which are destinations of
3022 removed stores at a loop exit. Return true if we copied live
3025 copy_info_to_removed_store_destinations (int regno
)
3028 ira_allocno_t parent_a
= NULL
;
3029 ira_loop_tree_node_t parent
;
3033 for (a
= ira_regno_allocno_map
[regno
];
3035 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3037 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3038 /* This allocno will be removed. */
3041 /* Caps will be removed. */
3042 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3043 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3045 parent
= parent
->parent
)
3046 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3048 == regno_top_level_allocno_map
[REGNO
3049 (allocno_emit_reg (parent_a
))]
3050 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3052 if (parent
== NULL
|| parent_a
== NULL
)
3055 copy_allocno_live_ranges (a
, parent_a
);
3056 merge_hard_reg_conflicts (a
, parent_a
, true);
3058 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3059 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3060 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3061 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3062 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3063 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3064 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3070 /* Flatten the IR. In other words, this function transforms IR as if
3071 it were built with one region (without loops). We could make it
3072 much simpler by rebuilding IR with one region, but unfortunately it
3073 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3074 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3075 IRA_MAX_POINT before emitting insns on the loop borders. */
3077 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3082 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3084 enum reg_class aclass
;
3085 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3087 ira_loop_tree_node_t node
;
3089 ira_allocno_iterator ai
;
3090 ira_copy_iterator ci
;
3092 regno_top_level_allocno_map
3093 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3094 * sizeof (ira_allocno_t
));
3095 memset (regno_top_level_allocno_map
, 0,
3096 max_reg_num () * sizeof (ira_allocno_t
));
3097 new_pseudos_p
= merged_p
= false;
3098 FOR_EACH_ALLOCNO (a
, ai
)
3100 ira_allocno_object_iterator oi
;
3103 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3104 /* Caps are not in the regno allocno maps and they are never
3105 will be transformed into allocnos existing after IR
3108 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3109 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
3110 OBJECT_CONFLICT_HARD_REGS (obj
));
3112 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3115 /* Fix final allocno attributes. */
3116 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3119 for (a
= ira_regno_allocno_map
[i
];
3121 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3123 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3125 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3126 if (data
->somewhere_renamed_p
)
3127 new_pseudos_p
= true;
3128 parent_a
= ira_parent_allocno (a
);
3129 if (parent_a
== NULL
)
3131 ALLOCNO_COPIES (a
) = NULL
;
3132 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3135 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3137 if (data
->mem_optimized_dest
!= NULL
)
3139 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3140 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3142 merge_hard_reg_conflicts (a
, parent_a
, true);
3143 move_allocno_live_ranges (a
, parent_a
);
3145 parent_data
->mem_optimized_dest_p
3146 = (parent_data
->mem_optimized_dest_p
3147 || data
->mem_optimized_dest_p
);
3150 new_pseudos_p
= true;
3153 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3154 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3155 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3156 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3157 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3158 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3159 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3160 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3161 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3162 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3163 && ALLOCNO_NREFS (parent_a
) >= 0
3164 && ALLOCNO_FREQ (parent_a
) >= 0);
3165 aclass
= ALLOCNO_CLASS (parent_a
);
3166 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3167 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3168 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3169 for (j
= 0; j
< hard_regs_num
; j
++)
3170 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3171 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3172 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3173 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3174 for (j
= 0; j
< hard_regs_num
; j
++)
3175 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3176 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3177 ALLOCNO_CLASS_COST (parent_a
)
3178 -= ALLOCNO_CLASS_COST (a
);
3179 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3180 parent_a
= ira_parent_allocno (parent_a
);
3181 if (parent_a
== NULL
)
3184 ALLOCNO_COPIES (a
) = NULL
;
3185 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3187 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3190 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3191 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3192 ira_rebuild_start_finish_chains ();
3195 sparseset objects_live
;
3197 /* Rebuild conflicts. */
3198 FOR_EACH_ALLOCNO (a
, ai
)
3200 ira_allocno_object_iterator oi
;
3203 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3204 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3206 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3208 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3209 ira_assert (r
->object
== obj
);
3210 clear_conflicts (obj
);
3213 objects_live
= sparseset_alloc (ira_objects_num
);
3214 for (i
= 0; i
< ira_max_point
; i
++)
3216 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3218 ira_object_t obj
= r
->object
;
3220 a
= OBJECT_ALLOCNO (obj
);
3221 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3222 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3225 aclass
= ALLOCNO_CLASS (a
);
3226 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3227 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3229 ira_object_t live_obj
= ira_object_id_map
[n
];
3230 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3231 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3233 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3234 /* Don't set up conflict for the allocno with itself. */
3236 ira_add_conflict (obj
, live_obj
);
3240 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3241 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3243 sparseset_free (objects_live
);
3244 compress_conflict_vecs ();
3246 /* Mark some copies for removing and change allocnos in the rest
3248 FOR_EACH_COPY (cp
, ci
)
3250 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3251 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3253 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3255 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3256 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3257 ALLOCNO_NUM (cp
->first
),
3258 REGNO (allocno_emit_reg (cp
->first
)),
3259 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3260 ALLOCNO_NUM (cp
->second
),
3261 REGNO (allocno_emit_reg (cp
->second
)));
3262 cp
->loop_tree_node
= NULL
;
3266 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3268 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3269 node
= cp
->loop_tree_node
;
3271 keep_p
= true; /* It copy generated in ira-emit.c. */
3274 /* Check that the copy was not propagated from level on
3275 which we will have different pseudos. */
3276 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3277 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3278 keep_p
= ((REGNO (allocno_emit_reg (first
))
3279 == REGNO (allocno_emit_reg (node_first
)))
3280 && (REGNO (allocno_emit_reg (second
))
3281 == REGNO (allocno_emit_reg (node_second
))));
3285 cp
->loop_tree_node
= ira_loop_tree_root
;
3287 cp
->second
= second
;
3291 cp
->loop_tree_node
= NULL
;
3292 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3293 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3294 cp
->num
, ALLOCNO_NUM (cp
->first
),
3295 REGNO (allocno_emit_reg (cp
->first
)),
3296 ALLOCNO_NUM (cp
->second
),
3297 REGNO (allocno_emit_reg (cp
->second
)));
3300 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3301 FOR_EACH_ALLOCNO (a
, ai
)
3303 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3304 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3306 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3307 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3308 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3309 ira_remove_allocno_prefs (a
);
3313 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3314 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3315 ALLOCNO_CAP (a
) = NULL
;
3316 /* Restore updated costs for assignments from reload. */
3317 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3318 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3319 if (! ALLOCNO_ASSIGNED_P (a
))
3320 ira_free_allocno_updated_costs (a
);
3321 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3322 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3324 /* Remove unnecessary copies. */
3325 FOR_EACH_COPY (cp
, ci
)
3327 if (cp
->loop_tree_node
== NULL
)
3329 ira_copies
[cp
->num
] = NULL
;
3334 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3335 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3336 add_allocno_copy_to_list (cp
);
3337 swap_allocno_copy_ends_if_necessary (cp
);
3339 rebuild_regno_allocno_maps ();
3340 if (ira_max_point
!= ira_max_point_before_emit
)
3341 ira_compress_allocno_live_ranges ();
3342 ira_free (regno_top_level_allocno_map
);
3347 #ifdef ENABLE_IRA_CHECKING
3348 /* Check creation of all allocnos. Allocnos on lower levels should
3349 have allocnos or caps on all upper levels. */
3351 check_allocno_creation (void)
3354 ira_allocno_iterator ai
;
3355 ira_loop_tree_node_t loop_tree_node
;
3357 FOR_EACH_ALLOCNO (a
, ai
)
3359 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3360 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3362 if (loop_tree_node
== ira_loop_tree_root
)
3364 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3365 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3366 else if (ALLOCNO_CAP (a
) == NULL
)
3367 ira_assert (loop_tree_node
->parent
3368 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3369 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3375 /* Identify allocnos which prefer a register class with a single hard register.
3376 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3377 less likely to use the preferred singleton register. */
3379 update_conflict_hard_reg_costs (void)
3382 ira_allocno_iterator ai
;
3385 FOR_EACH_ALLOCNO (a
, ai
)
3387 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3388 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3389 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3392 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3395 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3396 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3399 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3400 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3401 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3402 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3405 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3407 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3408 -= min
- ALLOCNO_CLASS_COST (a
);
3412 /* Create a internal representation (IR) for IRA (allocnos, copies,
3413 loop tree nodes). The function returns TRUE if we generate loop
3414 structure (besides nodes representing all function and the basic
3415 blocks) for regional allocation. A true return means that we
3416 really need to flatten IR before the reload. */
3423 initiate_cost_vectors ();
3424 initiate_allocnos ();
3427 create_loop_tree_nodes ();
3431 create_allocno_objects ();
3432 ira_create_allocno_live_ranges ();
3433 remove_unnecessary_regions (false);
3434 ira_compress_allocno_live_ranges ();
3435 update_bad_spill_attribute ();
3436 loops_p
= more_one_region_p ();
3439 propagate_allocno_info ();
3442 ira_tune_allocno_costs ();
3443 #ifdef ENABLE_IRA_CHECKING
3444 check_allocno_creation ();
3446 setup_min_max_allocno_live_range_point ();
3447 sort_conflict_id_map ();
3448 setup_min_max_conflict_allocno_ids ();
3449 ira_build_conflicts ();
3450 update_conflict_hard_reg_costs ();
3451 if (! ira_conflicts_p
)
3454 ira_allocno_iterator ai
;
3456 /* Remove all regions but root one. */
3459 remove_unnecessary_regions (true);
3462 /* We don't save hard registers around calls for fast allocation
3463 -- add caller clobbered registers as conflicting ones to
3464 allocno crossing calls. */
3465 FOR_EACH_ALLOCNO (a
, ai
)
3466 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3467 ior_hard_reg_conflicts (a
, &call_used_reg_set
);
3469 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3470 print_copies (ira_dump_file
);
3471 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3472 print_prefs (ira_dump_file
);
3473 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3478 ira_allocno_iterator ai
;
3483 FOR_EACH_ALLOCNO (a
, ai
)
3485 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3489 for (j
= 0; j
< nobj
; j
++)
3491 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3492 n
+= OBJECT_NUM_CONFLICTS (obj
);
3493 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3497 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3498 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3499 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3500 fprintf (ira_dump_file
,
3501 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3502 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3507 /* Release the data created by function ira_build. */
3511 finish_loop_tree_nodes ();
3515 finish_cost_vectors ();
3516 ira_finish_allocno_live_ranges ();