1 /* Building internal representation for IRA.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "insn-config.h"
34 #include "diagnostic-core.h"
37 #include "sparseset.h"
40 #include "alloc-pool.h"
42 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
44 static ira_copy_t
find_allocno_copy (ira_allocno_t
, ira_allocno_t
, rtx_insn
*,
45 ira_loop_tree_node_t
);
47 /* The root of the loop tree corresponding to the all function. */
48 ira_loop_tree_node_t ira_loop_tree_root
;
50 /* Height of the loop tree. */
51 int ira_loop_tree_height
;
53 /* All nodes representing basic blocks are referred through the
54 following array. We can not use basic block member `aux' for this
55 because it is used for insertion of insns on edges. */
56 ira_loop_tree_node_t ira_bb_nodes
;
58 /* All nodes representing loops are referred through the following
60 ira_loop_tree_node_t ira_loop_nodes
;
62 /* And size of the ira_loop_nodes array. */
63 unsigned int ira_loop_nodes_count
;
65 /* Map regno -> allocnos with given regno (see comments for
66 allocno member `next_regno_allocno'). */
67 ira_allocno_t
*ira_regno_allocno_map
;
69 /* Array of references to all allocnos. The order number of the
70 allocno corresponds to the index in the array. Removed allocnos
71 have NULL element value. */
72 ira_allocno_t
*ira_allocnos
;
74 /* Sizes of the previous array. */
77 /* Count of conflict record structures we've created, used when creating
81 /* Map a conflict id to its conflict record. */
82 ira_object_t
*ira_object_id_map
;
84 /* Array of references to all allocno preferences. The order number
85 of the preference corresponds to the index in the array. */
86 ira_pref_t
*ira_prefs
;
88 /* Size of the previous array. */
91 /* Array of references to all copies. The order number of the copy
92 corresponds to the index in the array. Removed copies have NULL
94 ira_copy_t
*ira_copies
;
96 /* Size of the previous array. */
101 /* LAST_BASIC_BLOCK before generating additional insns because of live
102 range splitting. Emitting insns on a critical edge creates a new
104 static int last_basic_block_before_change
;
106 /* Initialize some members in loop tree node NODE. Use LOOP_NUM for
107 the member loop_num. */
109 init_loop_tree_node (struct ira_loop_tree_node
*node
, int loop_num
)
111 int max_regno
= max_reg_num ();
113 node
->regno_allocno_map
114 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
) * max_regno
);
115 memset (node
->regno_allocno_map
, 0, sizeof (ira_allocno_t
) * max_regno
);
116 memset (node
->reg_pressure
, 0, sizeof (node
->reg_pressure
));
117 node
->all_allocnos
= ira_allocate_bitmap ();
118 node
->modified_regnos
= ira_allocate_bitmap ();
119 node
->border_allocnos
= ira_allocate_bitmap ();
120 node
->local_copies
= ira_allocate_bitmap ();
121 node
->loop_num
= loop_num
;
122 node
->children
= NULL
;
123 node
->subloops
= NULL
;
127 /* The following function allocates the loop tree nodes. If
128 CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
129 the root which corresponds the all function) will be not allocated
130 but nodes will still be allocated for basic blocks. */
132 create_loop_tree_nodes (void)
142 = ((struct ira_loop_tree_node
*)
143 ira_allocate (sizeof (struct ira_loop_tree_node
)
144 * last_basic_block_for_fn (cfun
)));
145 last_basic_block_before_change
= last_basic_block_for_fn (cfun
);
146 for (i
= 0; i
< (unsigned int) last_basic_block_for_fn (cfun
); i
++)
148 ira_bb_nodes
[i
].regno_allocno_map
= NULL
;
149 memset (ira_bb_nodes
[i
].reg_pressure
, 0,
150 sizeof (ira_bb_nodes
[i
].reg_pressure
));
151 ira_bb_nodes
[i
].all_allocnos
= NULL
;
152 ira_bb_nodes
[i
].modified_regnos
= NULL
;
153 ira_bb_nodes
[i
].border_allocnos
= NULL
;
154 ira_bb_nodes
[i
].local_copies
= NULL
;
156 if (current_loops
== NULL
)
158 ira_loop_nodes_count
= 1;
159 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
160 ira_allocate (sizeof (struct ira_loop_tree_node
)));
161 init_loop_tree_node (ira_loop_nodes
, 0);
164 ira_loop_nodes_count
= number_of_loops (cfun
);
165 ira_loop_nodes
= ((struct ira_loop_tree_node
*)
166 ira_allocate (sizeof (struct ira_loop_tree_node
)
167 * ira_loop_nodes_count
));
168 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
170 if (loop_outer (loop
) != NULL
)
172 ira_loop_nodes
[i
].regno_allocno_map
= NULL
;
174 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
175 if (e
->src
!= loop
->latch
176 && (e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
183 edges
= get_loop_exit_edges (loop
);
184 FOR_EACH_VEC_ELT (edges
, j
, e
)
185 if ((e
->flags
& EDGE_ABNORMAL
) && EDGE_CRITICAL_P (e
))
194 init_loop_tree_node (&ira_loop_nodes
[i
], loop
->num
);
198 /* The function returns TRUE if there are more one allocation
201 more_one_region_p (void)
206 if (current_loops
!= NULL
)
207 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
208 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
209 && ira_loop_tree_root
!= &ira_loop_nodes
[i
])
214 /* Free the loop tree node of a loop. */
216 finish_loop_tree_node (ira_loop_tree_node_t loop
)
218 if (loop
->regno_allocno_map
!= NULL
)
220 ira_assert (loop
->bb
== NULL
);
221 ira_free_bitmap (loop
->local_copies
);
222 ira_free_bitmap (loop
->border_allocnos
);
223 ira_free_bitmap (loop
->modified_regnos
);
224 ira_free_bitmap (loop
->all_allocnos
);
225 ira_free (loop
->regno_allocno_map
);
226 loop
->regno_allocno_map
= NULL
;
230 /* Free the loop tree nodes. */
232 finish_loop_tree_nodes (void)
236 for (i
= 0; i
< ira_loop_nodes_count
; i
++)
237 finish_loop_tree_node (&ira_loop_nodes
[i
]);
238 ira_free (ira_loop_nodes
);
239 for (i
= 0; i
< (unsigned int) last_basic_block_before_change
; i
++)
241 if (ira_bb_nodes
[i
].local_copies
!= NULL
)
242 ira_free_bitmap (ira_bb_nodes
[i
].local_copies
);
243 if (ira_bb_nodes
[i
].border_allocnos
!= NULL
)
244 ira_free_bitmap (ira_bb_nodes
[i
].border_allocnos
);
245 if (ira_bb_nodes
[i
].modified_regnos
!= NULL
)
246 ira_free_bitmap (ira_bb_nodes
[i
].modified_regnos
);
247 if (ira_bb_nodes
[i
].all_allocnos
!= NULL
)
248 ira_free_bitmap (ira_bb_nodes
[i
].all_allocnos
);
249 if (ira_bb_nodes
[i
].regno_allocno_map
!= NULL
)
250 ira_free (ira_bb_nodes
[i
].regno_allocno_map
);
252 ira_free (ira_bb_nodes
);
257 /* The following recursive function adds LOOP to the loop tree
258 hierarchy. LOOP is added only once. If LOOP is NULL we adding
259 loop designating the whole function when CFG loops are not
262 add_loop_to_tree (struct loop
*loop
)
266 ira_loop_tree_node_t loop_node
, parent_node
;
268 /* We can not use loop node access macros here because of potential
269 checking and because the nodes are not initialized enough
271 if (loop
!= NULL
&& loop_outer (loop
) != NULL
)
272 add_loop_to_tree (loop_outer (loop
));
273 loop_num
= loop
!= NULL
? loop
->num
: 0;
274 if (ira_loop_nodes
[loop_num
].regno_allocno_map
!= NULL
275 && ira_loop_nodes
[loop_num
].children
== NULL
)
277 /* We have not added loop node to the tree yet. */
278 loop_node
= &ira_loop_nodes
[loop_num
];
279 loop_node
->loop
= loop
;
280 loop_node
->bb
= NULL
;
285 for (parent
= loop_outer (loop
);
287 parent
= loop_outer (parent
))
288 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
293 loop_node
->next
= NULL
;
294 loop_node
->subloop_next
= NULL
;
295 loop_node
->parent
= NULL
;
299 parent_node
= &ira_loop_nodes
[parent
->num
];
300 loop_node
->next
= parent_node
->children
;
301 parent_node
->children
= loop_node
;
302 loop_node
->subloop_next
= parent_node
->subloops
;
303 parent_node
->subloops
= loop_node
;
304 loop_node
->parent
= parent_node
;
309 /* The following recursive function sets up levels of nodes of the
310 tree given its root LOOP_NODE. The enumeration starts with LEVEL.
311 The function returns maximal value of level in the tree + 1. */
313 setup_loop_tree_level (ira_loop_tree_node_t loop_node
, int level
)
315 int height
, max_height
;
316 ira_loop_tree_node_t subloop_node
;
318 ira_assert (loop_node
->bb
== NULL
);
319 loop_node
->level
= level
;
320 max_height
= level
+ 1;
321 for (subloop_node
= loop_node
->subloops
;
322 subloop_node
!= NULL
;
323 subloop_node
= subloop_node
->subloop_next
)
325 ira_assert (subloop_node
->bb
== NULL
);
326 height
= setup_loop_tree_level (subloop_node
, level
+ 1);
327 if (height
> max_height
)
333 /* Create the loop tree. The algorithm is designed to provide correct
334 order of loops (they are ordered by their last loop BB) and basic
335 blocks in the chain formed by member next. */
337 form_loop_tree (void)
341 ira_loop_tree_node_t bb_node
, loop_node
;
343 /* We can not use loop/bb node access macros because of potential
344 checking and because the nodes are not initialized enough
346 FOR_EACH_BB_FN (bb
, cfun
)
348 bb_node
= &ira_bb_nodes
[bb
->index
];
350 bb_node
->loop
= NULL
;
351 bb_node
->subloops
= NULL
;
352 bb_node
->children
= NULL
;
353 bb_node
->subloop_next
= NULL
;
354 bb_node
->next
= NULL
;
355 if (current_loops
== NULL
)
359 for (parent
= bb
->loop_father
;
361 parent
= loop_outer (parent
))
362 if (ira_loop_nodes
[parent
->num
].regno_allocno_map
!= NULL
)
365 add_loop_to_tree (parent
);
366 loop_node
= &ira_loop_nodes
[parent
== NULL
? 0 : parent
->num
];
367 bb_node
->next
= loop_node
->children
;
368 bb_node
->parent
= loop_node
;
369 loop_node
->children
= bb_node
;
371 ira_loop_tree_root
= IRA_LOOP_NODE_BY_INDEX (0);
372 ira_loop_tree_height
= setup_loop_tree_level (ira_loop_tree_root
, 0);
373 ira_assert (ira_loop_tree_root
->regno_allocno_map
!= NULL
);
378 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
381 rebuild_regno_allocno_maps (void)
384 int max_regno
, regno
;
386 ira_loop_tree_node_t loop_tree_node
;
388 ira_allocno_iterator ai
;
390 ira_assert (current_loops
!= NULL
);
391 max_regno
= max_reg_num ();
392 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), l
, loop
)
393 if (ira_loop_nodes
[l
].regno_allocno_map
!= NULL
)
395 ira_free (ira_loop_nodes
[l
].regno_allocno_map
);
396 ira_loop_nodes
[l
].regno_allocno_map
397 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
399 memset (ira_loop_nodes
[l
].regno_allocno_map
, 0,
400 sizeof (ira_allocno_t
) * max_regno
);
402 ira_free (ira_regno_allocno_map
);
403 ira_regno_allocno_map
404 = (ira_allocno_t
*) ira_allocate (max_regno
* sizeof (ira_allocno_t
));
405 memset (ira_regno_allocno_map
, 0, max_regno
* sizeof (ira_allocno_t
));
406 FOR_EACH_ALLOCNO (a
, ai
)
408 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
409 /* Caps are not in the regno allocno maps. */
411 regno
= ALLOCNO_REGNO (a
);
412 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
413 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
414 ira_regno_allocno_map
[regno
] = a
;
415 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
416 /* Remember that we can create temporary allocnos to break
417 cycles in register shuffle. */
418 loop_tree_node
->regno_allocno_map
[regno
] = a
;
423 /* Pools for allocnos, allocno live ranges and objects. */
424 static object_allocator
<live_range
> live_range_pool ("live ranges", 100);
425 static object_allocator
<ira_allocno
> allocno_pool ("allocnos", 100);
426 static object_allocator
<ira_object
> object_pool ("objects", 100);
428 /* Vec containing references to all created allocnos. It is a
429 container of array allocnos. */
430 static vec
<ira_allocno_t
> allocno_vec
;
432 /* Vec containing references to all created ira_objects. It is a
433 container of ira_object_id_map. */
434 static vec
<ira_object_t
> ira_object_id_map_vec
;
436 /* Initialize data concerning allocnos. */
438 initiate_allocnos (void)
440 allocno_vec
.create (max_reg_num () * 2);
442 ira_allocnos_num
= 0;
444 ira_object_id_map_vec
.create (max_reg_num () * 2);
445 ira_object_id_map
= NULL
;
446 ira_regno_allocno_map
447 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
448 * sizeof (ira_allocno_t
));
449 memset (ira_regno_allocno_map
, 0, max_reg_num () * sizeof (ira_allocno_t
));
452 /* Create and return an object corresponding to a new allocno A. */
454 ira_create_object (ira_allocno_t a
, int subword
)
456 enum reg_class aclass
= ALLOCNO_CLASS (a
);
457 ira_object_t obj
= object_pool
.allocate ();
459 OBJECT_ALLOCNO (obj
) = a
;
460 OBJECT_SUBWORD (obj
) = subword
;
461 OBJECT_CONFLICT_ID (obj
) = ira_objects_num
;
462 OBJECT_CONFLICT_VEC_P (obj
) = false;
463 OBJECT_CONFLICT_ARRAY (obj
) = NULL
;
464 OBJECT_NUM_CONFLICTS (obj
) = 0;
465 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
466 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), ira_no_alloc_regs
);
467 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
468 reg_class_contents
[aclass
]);
469 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
470 reg_class_contents
[aclass
]);
471 OBJECT_MIN (obj
) = INT_MAX
;
472 OBJECT_MAX (obj
) = -1;
473 OBJECT_LIVE_RANGES (obj
) = NULL
;
475 ira_object_id_map_vec
.safe_push (obj
);
477 = ira_object_id_map_vec
.address ();
478 ira_objects_num
= ira_object_id_map_vec
.length ();
483 /* Create and return the allocno corresponding to REGNO in
484 LOOP_TREE_NODE. Add the allocno to the list of allocnos with the
485 same regno if CAP_P is FALSE. */
487 ira_create_allocno (int regno
, bool cap_p
,
488 ira_loop_tree_node_t loop_tree_node
)
492 a
= allocno_pool
.allocate ();
493 ALLOCNO_REGNO (a
) = regno
;
494 ALLOCNO_LOOP_TREE_NODE (a
) = loop_tree_node
;
497 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = ira_regno_allocno_map
[regno
];
498 ira_regno_allocno_map
[regno
] = a
;
499 if (loop_tree_node
->regno_allocno_map
[regno
] == NULL
)
500 /* Remember that we can create temporary allocnos to break
501 cycles in register shuffle on region borders (see
503 loop_tree_node
->regno_allocno_map
[regno
] = a
;
505 ALLOCNO_CAP (a
) = NULL
;
506 ALLOCNO_CAP_MEMBER (a
) = NULL
;
507 ALLOCNO_NUM (a
) = ira_allocnos_num
;
508 bitmap_set_bit (loop_tree_node
->all_allocnos
, ALLOCNO_NUM (a
));
509 ALLOCNO_NREFS (a
) = 0;
510 ALLOCNO_FREQ (a
) = 0;
511 ALLOCNO_HARD_REGNO (a
) = -1;
512 ALLOCNO_CALL_FREQ (a
) = 0;
513 ALLOCNO_CALLS_CROSSED_NUM (a
) = 0;
514 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
) = 0;
515 CLEAR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
517 ALLOCNO_NO_STACK_REG_P (a
) = false;
518 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = false;
520 ALLOCNO_DONT_REASSIGN_P (a
) = false;
521 ALLOCNO_BAD_SPILL_P (a
) = false;
522 ALLOCNO_ASSIGNED_P (a
) = false;
523 ALLOCNO_MODE (a
) = (regno
< 0 ? VOIDmode
: PSEUDO_REGNO_MODE (regno
));
524 ALLOCNO_WMODE (a
) = ALLOCNO_MODE (a
);
525 ALLOCNO_PREFS (a
) = NULL
;
526 ALLOCNO_COPIES (a
) = NULL
;
527 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
528 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
529 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
530 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
531 ALLOCNO_CLASS (a
) = NO_REGS
;
532 ALLOCNO_UPDATED_CLASS_COST (a
) = 0;
533 ALLOCNO_CLASS_COST (a
) = 0;
534 ALLOCNO_MEMORY_COST (a
) = 0;
535 ALLOCNO_UPDATED_MEMORY_COST (a
) = 0;
536 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) = 0;
537 ALLOCNO_NUM_OBJECTS (a
) = 0;
539 ALLOCNO_ADD_DATA (a
) = NULL
;
540 allocno_vec
.safe_push (a
);
541 ira_allocnos
= allocno_vec
.address ();
542 ira_allocnos_num
= allocno_vec
.length ();
547 /* Set up register class for A and update its conflict hard
550 ira_set_allocno_class (ira_allocno_t a
, enum reg_class aclass
)
552 ira_allocno_object_iterator oi
;
555 ALLOCNO_CLASS (a
) = aclass
;
556 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
558 IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
559 reg_class_contents
[aclass
]);
560 IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
561 reg_class_contents
[aclass
]);
565 /* Determine the number of objects we should associate with allocno A
566 and allocate them. */
568 ira_create_allocno_objects (ira_allocno_t a
)
570 machine_mode mode
= ALLOCNO_MODE (a
);
571 enum reg_class aclass
= ALLOCNO_CLASS (a
);
572 int n
= ira_reg_class_max_nregs
[aclass
][mode
];
575 if (GET_MODE_SIZE (mode
) != 2 * UNITS_PER_WORD
|| n
!= 2)
578 ALLOCNO_NUM_OBJECTS (a
) = n
;
579 for (i
= 0; i
< n
; i
++)
580 ALLOCNO_OBJECT (a
, i
) = ira_create_object (a
, i
);
583 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
584 ALLOCNO_OBJECT structures. This must be called after the allocno
585 classes are known. */
587 create_allocno_objects (void)
590 ira_allocno_iterator ai
;
592 FOR_EACH_ALLOCNO (a
, ai
)
593 ira_create_allocno_objects (a
);
596 /* Merge hard register conflict information for all objects associated with
597 allocno TO into the corresponding objects associated with FROM.
598 If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS. */
600 merge_hard_reg_conflicts (ira_allocno_t from
, ira_allocno_t to
,
604 gcc_assert (ALLOCNO_NUM_OBJECTS (to
) == ALLOCNO_NUM_OBJECTS (from
));
605 for (i
= 0; i
< ALLOCNO_NUM_OBJECTS (to
); i
++)
607 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
608 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
611 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj
),
612 OBJECT_CONFLICT_HARD_REGS (from_obj
));
613 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj
),
614 OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj
));
617 if (!total_only
&& ALLOCNO_NO_STACK_REG_P (from
))
618 ALLOCNO_NO_STACK_REG_P (to
) = true;
619 if (ALLOCNO_TOTAL_NO_STACK_REG_P (from
))
620 ALLOCNO_TOTAL_NO_STACK_REG_P (to
) = true;
624 /* Update hard register conflict information for all objects associated with
625 A to include the regs in SET. */
627 ior_hard_reg_conflicts (ira_allocno_t a
, HARD_REG_SET
*set
)
629 ira_allocno_object_iterator i
;
632 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, i
)
634 IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
), *set
);
635 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), *set
);
639 /* Return TRUE if a conflict vector with NUM elements is more
640 profitable than a conflict bit vector for OBJ. */
642 ira_conflict_vector_profitable_p (ira_object_t obj
, int num
)
645 int max
= OBJECT_MAX (obj
);
646 int min
= OBJECT_MIN (obj
);
649 /* We prefer a bit vector in such case because it does not result
653 nw
= (max
- min
+ IRA_INT_BITS
) / IRA_INT_BITS
;
654 return (2 * sizeof (ira_object_t
) * (num
+ 1)
655 < 3 * nw
* sizeof (IRA_INT_TYPE
));
658 /* Allocates and initialize the conflict vector of OBJ for NUM
659 conflicting objects. */
661 ira_allocate_conflict_vec (ira_object_t obj
, int num
)
666 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
667 num
++; /* for NULL end marker */
668 size
= sizeof (ira_object_t
) * num
;
669 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
670 vec
= (ira_object_t
*) OBJECT_CONFLICT_ARRAY (obj
);
672 OBJECT_NUM_CONFLICTS (obj
) = 0;
673 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
674 OBJECT_CONFLICT_VEC_P (obj
) = true;
677 /* Allocate and initialize the conflict bit vector of OBJ. */
679 allocate_conflict_bit_vec (ira_object_t obj
)
683 ira_assert (OBJECT_CONFLICT_ARRAY (obj
) == NULL
);
684 size
= ((OBJECT_MAX (obj
) - OBJECT_MIN (obj
) + IRA_INT_BITS
)
685 / IRA_INT_BITS
* sizeof (IRA_INT_TYPE
));
686 OBJECT_CONFLICT_ARRAY (obj
) = ira_allocate (size
);
687 memset (OBJECT_CONFLICT_ARRAY (obj
), 0, size
);
688 OBJECT_CONFLICT_ARRAY_SIZE (obj
) = size
;
689 OBJECT_CONFLICT_VEC_P (obj
) = false;
692 /* Allocate and initialize the conflict vector or conflict bit vector
693 of OBJ for NUM conflicting allocnos whatever is more profitable. */
695 ira_allocate_object_conflicts (ira_object_t obj
, int num
)
697 if (ira_conflict_vector_profitable_p (obj
, num
))
698 ira_allocate_conflict_vec (obj
, num
);
700 allocate_conflict_bit_vec (obj
);
703 /* Add OBJ2 to the conflicts of OBJ1. */
705 add_to_conflicts (ira_object_t obj1
, ira_object_t obj2
)
710 if (OBJECT_CONFLICT_VEC_P (obj1
))
712 ira_object_t
*vec
= OBJECT_CONFLICT_VEC (obj1
);
713 int curr_num
= OBJECT_NUM_CONFLICTS (obj1
);
715 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < num
* sizeof (ira_object_t
))
717 ira_object_t
*newvec
;
718 size
= (3 * num
/ 2 + 1) * sizeof (ira_allocno_t
);
719 newvec
= (ira_object_t
*) ira_allocate (size
);
720 memcpy (newvec
, vec
, curr_num
* sizeof (ira_object_t
));
723 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
724 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
728 OBJECT_NUM_CONFLICTS (obj1
)++;
732 int nw
, added_head_nw
, id
;
733 IRA_INT_TYPE
*vec
= OBJECT_CONFLICT_BITVEC (obj1
);
735 id
= OBJECT_CONFLICT_ID (obj2
);
736 if (OBJECT_MIN (obj1
) > id
)
738 /* Expand head of the bit vector. */
739 added_head_nw
= (OBJECT_MIN (obj1
) - id
- 1) / IRA_INT_BITS
+ 1;
740 nw
= (OBJECT_MAX (obj1
) - OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
741 size
= (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
);
742 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) >= size
)
744 memmove ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
745 vec
, nw
* sizeof (IRA_INT_TYPE
));
746 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
751 = (3 * (nw
+ added_head_nw
) / 2 + 1) * sizeof (IRA_INT_TYPE
);
752 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
753 memcpy ((char *) vec
+ added_head_nw
* sizeof (IRA_INT_TYPE
),
754 OBJECT_CONFLICT_ARRAY (obj1
), nw
* sizeof (IRA_INT_TYPE
));
755 memset (vec
, 0, added_head_nw
* sizeof (IRA_INT_TYPE
));
757 + (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
),
758 0, size
- (nw
+ added_head_nw
) * sizeof (IRA_INT_TYPE
));
759 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
760 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
761 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
763 OBJECT_MIN (obj1
) -= added_head_nw
* IRA_INT_BITS
;
765 else if (OBJECT_MAX (obj1
) < id
)
767 nw
= (id
- OBJECT_MIN (obj1
)) / IRA_INT_BITS
+ 1;
768 size
= nw
* sizeof (IRA_INT_TYPE
);
769 if (OBJECT_CONFLICT_ARRAY_SIZE (obj1
) < size
)
771 /* Expand tail of the bit vector. */
772 size
= (3 * nw
/ 2 + 1) * sizeof (IRA_INT_TYPE
);
773 vec
= (IRA_INT_TYPE
*) ira_allocate (size
);
774 memcpy (vec
, OBJECT_CONFLICT_ARRAY (obj1
), OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
775 memset ((char *) vec
+ OBJECT_CONFLICT_ARRAY_SIZE (obj1
),
776 0, size
- OBJECT_CONFLICT_ARRAY_SIZE (obj1
));
777 ira_free (OBJECT_CONFLICT_ARRAY (obj1
));
778 OBJECT_CONFLICT_ARRAY (obj1
) = vec
;
779 OBJECT_CONFLICT_ARRAY_SIZE (obj1
) = size
;
781 OBJECT_MAX (obj1
) = id
;
783 SET_MINMAX_SET_BIT (vec
, id
, OBJECT_MIN (obj1
), OBJECT_MAX (obj1
));
787 /* Add OBJ1 to the conflicts of OBJ2 and vice versa. */
789 ira_add_conflict (ira_object_t obj1
, ira_object_t obj2
)
791 add_to_conflicts (obj1
, obj2
);
792 add_to_conflicts (obj2
, obj1
);
795 /* Clear all conflicts of OBJ. */
797 clear_conflicts (ira_object_t obj
)
799 if (OBJECT_CONFLICT_VEC_P (obj
))
801 OBJECT_NUM_CONFLICTS (obj
) = 0;
802 OBJECT_CONFLICT_VEC (obj
)[0] = NULL
;
804 else if (OBJECT_CONFLICT_ARRAY_SIZE (obj
) != 0)
808 nw
= (OBJECT_MAX (obj
) - OBJECT_MIN (obj
)) / IRA_INT_BITS
+ 1;
809 memset (OBJECT_CONFLICT_BITVEC (obj
), 0, nw
* sizeof (IRA_INT_TYPE
));
813 /* The array used to find duplications in conflict vectors of
815 static int *conflict_check
;
817 /* The value used to mark allocation presence in conflict vector of
818 the current allocno. */
819 static int curr_conflict_check_tick
;
821 /* Remove duplications in conflict vector of OBJ. */
823 compress_conflict_vec (ira_object_t obj
)
825 ira_object_t
*vec
, conflict_obj
;
828 ira_assert (OBJECT_CONFLICT_VEC_P (obj
));
829 vec
= OBJECT_CONFLICT_VEC (obj
);
830 curr_conflict_check_tick
++;
831 for (i
= j
= 0; (conflict_obj
= vec
[i
]) != NULL
; i
++)
833 int id
= OBJECT_CONFLICT_ID (conflict_obj
);
834 if (conflict_check
[id
] != curr_conflict_check_tick
)
836 conflict_check
[id
] = curr_conflict_check_tick
;
837 vec
[j
++] = conflict_obj
;
840 OBJECT_NUM_CONFLICTS (obj
) = j
;
844 /* Remove duplications in conflict vectors of all allocnos. */
846 compress_conflict_vecs (void)
849 ira_object_iterator oi
;
851 conflict_check
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
852 memset (conflict_check
, 0, sizeof (int) * ira_objects_num
);
853 curr_conflict_check_tick
= 0;
854 FOR_EACH_OBJECT (obj
, oi
)
856 if (OBJECT_CONFLICT_VEC_P (obj
))
857 compress_conflict_vec (obj
);
859 ira_free (conflict_check
);
862 /* This recursive function outputs allocno A and if it is a cap the
863 function outputs its members. */
865 ira_print_expanded_allocno (ira_allocno_t a
)
869 fprintf (ira_dump_file
, " a%d(r%d", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
870 if ((bb
= ALLOCNO_LOOP_TREE_NODE (a
)->bb
) != NULL
)
871 fprintf (ira_dump_file
, ",b%d", bb
->index
);
873 fprintf (ira_dump_file
, ",l%d", ALLOCNO_LOOP_TREE_NODE (a
)->loop_num
);
874 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
876 fprintf (ira_dump_file
, ":");
877 ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a
));
879 fprintf (ira_dump_file
, ")");
882 /* Create and return the cap representing allocno A in the
885 create_cap_allocno (ira_allocno_t a
)
888 ira_loop_tree_node_t parent
;
889 enum reg_class aclass
;
891 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
892 cap
= ira_create_allocno (ALLOCNO_REGNO (a
), true, parent
);
893 ALLOCNO_MODE (cap
) = ALLOCNO_MODE (a
);
894 ALLOCNO_WMODE (cap
) = ALLOCNO_WMODE (a
);
895 aclass
= ALLOCNO_CLASS (a
);
896 ira_set_allocno_class (cap
, aclass
);
897 ira_create_allocno_objects (cap
);
898 ALLOCNO_CAP_MEMBER (cap
) = a
;
899 ALLOCNO_CAP (a
) = cap
;
900 ALLOCNO_CLASS_COST (cap
) = ALLOCNO_CLASS_COST (a
);
901 ALLOCNO_MEMORY_COST (cap
) = ALLOCNO_MEMORY_COST (a
);
902 ira_allocate_and_copy_costs
903 (&ALLOCNO_HARD_REG_COSTS (cap
), aclass
, ALLOCNO_HARD_REG_COSTS (a
));
904 ira_allocate_and_copy_costs
905 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap
), aclass
,
906 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
907 ALLOCNO_BAD_SPILL_P (cap
) = ALLOCNO_BAD_SPILL_P (a
);
908 ALLOCNO_NREFS (cap
) = ALLOCNO_NREFS (a
);
909 ALLOCNO_FREQ (cap
) = ALLOCNO_FREQ (a
);
910 ALLOCNO_CALL_FREQ (cap
) = ALLOCNO_CALL_FREQ (a
);
912 merge_hard_reg_conflicts (a
, cap
, false);
914 ALLOCNO_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CALLS_CROSSED_NUM (a
);
915 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (cap
) = ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
916 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (cap
),
917 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
918 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
920 fprintf (ira_dump_file
, " Creating cap ");
921 ira_print_expanded_allocno (cap
);
922 fprintf (ira_dump_file
, "\n");
927 /* Create and return a live range for OBJECT with given attributes. */
929 ira_create_live_range (ira_object_t obj
, int start
, int finish
,
934 p
= live_range_pool
.allocate ();
942 /* Create a new live range for OBJECT and queue it at the head of its
945 ira_add_live_range_to_object (ira_object_t object
, int start
, int finish
)
948 p
= ira_create_live_range (object
, start
, finish
,
949 OBJECT_LIVE_RANGES (object
));
950 OBJECT_LIVE_RANGES (object
) = p
;
953 /* Copy allocno live range R and return the result. */
955 copy_live_range (live_range_t r
)
959 p
= live_range_pool
.allocate ();
964 /* Copy allocno live range list given by its head R and return the
967 ira_copy_live_range_list (live_range_t r
)
969 live_range_t p
, first
, last
;
973 for (first
= last
= NULL
; r
!= NULL
; r
= r
->next
)
975 p
= copy_live_range (r
);
985 /* Merge ranges R1 and R2 and returns the result. The function
986 maintains the order of ranges and tries to minimize number of the
989 ira_merge_live_ranges (live_range_t r1
, live_range_t r2
)
991 live_range_t first
, last
;
997 for (first
= last
= NULL
; r1
!= NULL
&& r2
!= NULL
;)
999 if (r1
->start
< r2
->start
)
1001 if (r1
->start
<= r2
->finish
+ 1)
1003 /* Intersected ranges: merge r1 and r2 into r1. */
1004 r1
->start
= r2
->start
;
1005 if (r1
->finish
< r2
->finish
)
1006 r1
->finish
= r2
->finish
;
1007 live_range_t temp
= r2
;
1009 ira_finish_live_range (temp
);
1012 /* To try to merge with subsequent ranges in r1. */
1019 /* Add r1 to the result. */
1030 /* To try to merge with subsequent ranges in r2. */
1042 ira_assert (r1
->next
== NULL
);
1044 else if (r2
!= NULL
)
1050 ira_assert (r2
->next
== NULL
);
1054 ira_assert (last
->next
== NULL
);
1059 /* Return TRUE if live ranges R1 and R2 intersect. */
1061 ira_live_ranges_intersect_p (live_range_t r1
, live_range_t r2
)
1063 /* Remember the live ranges are always kept ordered. */
1064 while (r1
!= NULL
&& r2
!= NULL
)
1066 if (r1
->start
> r2
->finish
)
1068 else if (r2
->start
> r1
->finish
)
1076 /* Free allocno live range R. */
1078 ira_finish_live_range (live_range_t r
)
1080 live_range_pool
.remove (r
);
1083 /* Free list of allocno live ranges starting with R. */
1085 ira_finish_live_range_list (live_range_t r
)
1087 live_range_t next_r
;
1089 for (; r
!= NULL
; r
= next_r
)
1092 ira_finish_live_range (r
);
1096 /* Free updated register costs of allocno A. */
1098 ira_free_allocno_updated_costs (ira_allocno_t a
)
1100 enum reg_class aclass
;
1102 aclass
= ALLOCNO_CLASS (a
);
1103 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1104 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1105 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1106 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1107 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1109 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1112 /* Free and nullify all cost vectors allocated earlier for allocno
1115 ira_free_allocno_costs (ira_allocno_t a
)
1117 enum reg_class aclass
= ALLOCNO_CLASS (a
);
1119 ira_allocno_object_iterator oi
;
1121 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
1123 ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj
));
1124 ira_object_id_map
[OBJECT_CONFLICT_ID (obj
)] = NULL
;
1125 if (OBJECT_CONFLICT_ARRAY (obj
) != NULL
)
1126 ira_free (OBJECT_CONFLICT_ARRAY (obj
));
1127 object_pool
.remove (obj
);
1130 ira_allocnos
[ALLOCNO_NUM (a
)] = NULL
;
1131 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
)
1132 ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a
), aclass
);
1133 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1134 ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
), aclass
);
1135 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) != NULL
)
1136 ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
);
1137 if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) != NULL
)
1138 ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
1140 ALLOCNO_HARD_REG_COSTS (a
) = NULL
;
1141 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1142 ALLOCNO_UPDATED_HARD_REG_COSTS (a
) = NULL
;
1143 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) = NULL
;
1146 /* Free the memory allocated for allocno A. */
1148 finish_allocno (ira_allocno_t a
)
1150 ira_free_allocno_costs (a
);
1151 allocno_pool
.remove (a
);
1154 /* Free the memory allocated for all allocnos. */
1156 finish_allocnos (void)
1159 ira_allocno_iterator ai
;
1161 FOR_EACH_ALLOCNO (a
, ai
)
1163 ira_free (ira_regno_allocno_map
);
1164 ira_object_id_map_vec
.release ();
1165 allocno_vec
.release ();
1166 allocno_pool
.release ();
1167 object_pool
.release ();
1168 live_range_pool
.release ();
1173 /* Pools for allocno preferences. */
1174 static object_allocator
<ira_allocno_pref
> pref_pool ("prefs", 100);
1176 /* Vec containing references to all created preferences. It is a
1177 container of array ira_prefs. */
1178 static vec
<ira_pref_t
> pref_vec
;
1180 /* The function initializes data concerning allocno prefs. */
1182 initiate_prefs (void)
1184 pref_vec
.create (get_max_uid ());
1189 /* Return pref for A and HARD_REGNO if any. */
1191 find_allocno_pref (ira_allocno_t a
, int hard_regno
)
1195 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1196 if (pref
->allocno
== a
&& pref
->hard_regno
== hard_regno
)
1201 /* Create and return pref with given attributes A, HARD_REGNO, and FREQ. */
1203 ira_create_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1207 pref
= pref_pool
.allocate ();
1208 pref
->num
= ira_prefs_num
;
1210 pref
->hard_regno
= hard_regno
;
1212 pref_vec
.safe_push (pref
);
1213 ira_prefs
= pref_vec
.address ();
1214 ira_prefs_num
= pref_vec
.length ();
1218 /* Attach a pref PREF to the corresponding allocno. */
1220 add_allocno_pref_to_list (ira_pref_t pref
)
1222 ira_allocno_t a
= pref
->allocno
;
1224 pref
->next_pref
= ALLOCNO_PREFS (a
);
1225 ALLOCNO_PREFS (a
) = pref
;
1228 /* Create (or update frequency if the pref already exists) the pref of
1229 allocnos A preferring HARD_REGNO with frequency FREQ. */
1231 ira_add_allocno_pref (ira_allocno_t a
, int hard_regno
, int freq
)
1237 if ((pref
= find_allocno_pref (a
, hard_regno
)) != NULL
)
1242 pref
= ira_create_pref (a
, hard_regno
, freq
);
1243 ira_assert (a
!= NULL
);
1244 add_allocno_pref_to_list (pref
);
1247 /* Print info about PREF into file F. */
1249 print_pref (FILE *f
, ira_pref_t pref
)
1251 fprintf (f
, " pref%d:a%d(r%d)<-hr%d@%d\n", pref
->num
,
1252 ALLOCNO_NUM (pref
->allocno
), ALLOCNO_REGNO (pref
->allocno
),
1253 pref
->hard_regno
, pref
->freq
);
1256 /* Print info about PREF into stderr. */
1258 ira_debug_pref (ira_pref_t pref
)
1260 print_pref (stderr
, pref
);
1263 /* Print info about all prefs into file F. */
1265 print_prefs (FILE *f
)
1268 ira_pref_iterator pi
;
1270 FOR_EACH_PREF (pref
, pi
)
1271 print_pref (f
, pref
);
1274 /* Print info about all prefs into stderr. */
1276 ira_debug_prefs (void)
1278 print_prefs (stderr
);
1281 /* Print info about prefs involving allocno A into file F. */
1283 print_allocno_prefs (FILE *f
, ira_allocno_t a
)
1287 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1288 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= pref
->next_pref
)
1289 fprintf (f
, " pref%d:hr%d@%d", pref
->num
, pref
->hard_regno
, pref
->freq
);
1293 /* Print info about prefs involving allocno A into stderr. */
1295 ira_debug_allocno_prefs (ira_allocno_t a
)
1297 print_allocno_prefs (stderr
, a
);
1300 /* The function frees memory allocated for PREF. */
1302 finish_pref (ira_pref_t pref
)
1304 ira_prefs
[pref
->num
] = NULL
;
1305 pref_pool
.remove (pref
);
1308 /* Remove PREF from the list of allocno prefs and free memory for
1311 ira_remove_pref (ira_pref_t pref
)
1313 ira_pref_t cpref
, prev
;
1315 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
1316 fprintf (ira_dump_file
, " Removing pref%d:hr%d@%d\n",
1317 pref
->num
, pref
->hard_regno
, pref
->freq
);
1318 for (prev
= NULL
, cpref
= ALLOCNO_PREFS (pref
->allocno
);
1320 prev
= cpref
, cpref
= cpref
->next_pref
)
1323 ira_assert (cpref
!= NULL
);
1325 ALLOCNO_PREFS (pref
->allocno
) = pref
->next_pref
;
1327 prev
->next_pref
= pref
->next_pref
;
1331 /* Remove all prefs of allocno A. */
1333 ira_remove_allocno_prefs (ira_allocno_t a
)
1335 ira_pref_t pref
, next_pref
;
1337 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
1339 next_pref
= pref
->next_pref
;
1342 ALLOCNO_PREFS (a
) = NULL
;
1345 /* Free memory allocated for all prefs. */
1350 ira_pref_iterator pi
;
1352 FOR_EACH_PREF (pref
, pi
)
1354 pref_vec
.release ();
1355 pref_pool
.release ();
1360 /* Pools for copies. */
1361 static object_allocator
<ira_allocno_copy
> copy_pool ("copies", 100);
1363 /* Vec containing references to all created copies. It is a
1364 container of array ira_copies. */
1365 static vec
<ira_copy_t
> copy_vec
;
1367 /* The function initializes data concerning allocno copies. */
1369 initiate_copies (void)
1371 copy_vec
.create (get_max_uid ());
1376 /* Return copy connecting A1 and A2 and originated from INSN of
1377 LOOP_TREE_NODE if any. */
1379 find_allocno_copy (ira_allocno_t a1
, ira_allocno_t a2
, rtx_insn
*insn
,
1380 ira_loop_tree_node_t loop_tree_node
)
1382 ira_copy_t cp
, next_cp
;
1383 ira_allocno_t another_a
;
1385 for (cp
= ALLOCNO_COPIES (a1
); cp
!= NULL
; cp
= next_cp
)
1387 if (cp
->first
== a1
)
1389 next_cp
= cp
->next_first_allocno_copy
;
1390 another_a
= cp
->second
;
1392 else if (cp
->second
== a1
)
1394 next_cp
= cp
->next_second_allocno_copy
;
1395 another_a
= cp
->first
;
1399 if (another_a
== a2
&& cp
->insn
== insn
1400 && cp
->loop_tree_node
== loop_tree_node
)
1406 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1407 SECOND, FREQ, CONSTRAINT_P, and INSN. */
1409 ira_create_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1410 bool constraint_p
, rtx_insn
*insn
,
1411 ira_loop_tree_node_t loop_tree_node
)
1415 cp
= copy_pool
.allocate ();
1416 cp
->num
= ira_copies_num
;
1418 cp
->second
= second
;
1420 cp
->constraint_p
= constraint_p
;
1422 cp
->loop_tree_node
= loop_tree_node
;
1423 copy_vec
.safe_push (cp
);
1424 ira_copies
= copy_vec
.address ();
1425 ira_copies_num
= copy_vec
.length ();
1429 /* Attach a copy CP to allocnos involved into the copy. */
1431 add_allocno_copy_to_list (ira_copy_t cp
)
1433 ira_allocno_t first
= cp
->first
, second
= cp
->second
;
1435 cp
->prev_first_allocno_copy
= NULL
;
1436 cp
->prev_second_allocno_copy
= NULL
;
1437 cp
->next_first_allocno_copy
= ALLOCNO_COPIES (first
);
1438 if (cp
->next_first_allocno_copy
!= NULL
)
1440 if (cp
->next_first_allocno_copy
->first
== first
)
1441 cp
->next_first_allocno_copy
->prev_first_allocno_copy
= cp
;
1443 cp
->next_first_allocno_copy
->prev_second_allocno_copy
= cp
;
1445 cp
->next_second_allocno_copy
= ALLOCNO_COPIES (second
);
1446 if (cp
->next_second_allocno_copy
!= NULL
)
1448 if (cp
->next_second_allocno_copy
->second
== second
)
1449 cp
->next_second_allocno_copy
->prev_second_allocno_copy
= cp
;
1451 cp
->next_second_allocno_copy
->prev_first_allocno_copy
= cp
;
1453 ALLOCNO_COPIES (first
) = cp
;
1454 ALLOCNO_COPIES (second
) = cp
;
1457 /* Make a copy CP a canonical copy where number of the
1458 first allocno is less than the second one. */
1460 swap_allocno_copy_ends_if_necessary (ira_copy_t cp
)
1462 if (ALLOCNO_NUM (cp
->first
) <= ALLOCNO_NUM (cp
->second
))
1465 std::swap (cp
->first
, cp
->second
);
1466 std::swap (cp
->prev_first_allocno_copy
, cp
->prev_second_allocno_copy
);
1467 std::swap (cp
->next_first_allocno_copy
, cp
->next_second_allocno_copy
);
1470 /* Create (or update frequency if the copy already exists) and return
1471 the copy of allocnos FIRST and SECOND with frequency FREQ
1472 corresponding to move insn INSN (if any) and originated from
1475 ira_add_allocno_copy (ira_allocno_t first
, ira_allocno_t second
, int freq
,
1476 bool constraint_p
, rtx_insn
*insn
,
1477 ira_loop_tree_node_t loop_tree_node
)
1481 if ((cp
= find_allocno_copy (first
, second
, insn
, loop_tree_node
)) != NULL
)
1486 cp
= ira_create_copy (first
, second
, freq
, constraint_p
, insn
,
1488 ira_assert (first
!= NULL
&& second
!= NULL
);
1489 add_allocno_copy_to_list (cp
);
1490 swap_allocno_copy_ends_if_necessary (cp
);
1494 /* Print info about copy CP into file F. */
1496 print_copy (FILE *f
, ira_copy_t cp
)
1498 fprintf (f
, " cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp
->num
,
1499 ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
1500 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
), cp
->freq
,
1502 ? "move" : cp
->constraint_p
? "constraint" : "shuffle");
1506 debug (ira_allocno_copy
&ref
)
1508 print_copy (stderr
, &ref
);
1512 debug (ira_allocno_copy
*ptr
)
1517 fprintf (stderr
, "<nil>\n");
1520 /* Print info about copy CP into stderr. */
1522 ira_debug_copy (ira_copy_t cp
)
1524 print_copy (stderr
, cp
);
1527 /* Print info about all copies into file F. */
1529 print_copies (FILE *f
)
1532 ira_copy_iterator ci
;
1534 FOR_EACH_COPY (cp
, ci
)
1538 /* Print info about all copies into stderr. */
1540 ira_debug_copies (void)
1542 print_copies (stderr
);
1545 /* Print info about copies involving allocno A into file F. */
1547 print_allocno_copies (FILE *f
, ira_allocno_t a
)
1549 ira_allocno_t another_a
;
1550 ira_copy_t cp
, next_cp
;
1552 fprintf (f
, " a%d(r%d):", ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
1553 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
1557 next_cp
= cp
->next_first_allocno_copy
;
1558 another_a
= cp
->second
;
1560 else if (cp
->second
== a
)
1562 next_cp
= cp
->next_second_allocno_copy
;
1563 another_a
= cp
->first
;
1567 fprintf (f
, " cp%d:a%d(r%d)@%d", cp
->num
,
1568 ALLOCNO_NUM (another_a
), ALLOCNO_REGNO (another_a
), cp
->freq
);
1574 debug (ira_allocno
&ref
)
1576 print_allocno_copies (stderr
, &ref
);
1580 debug (ira_allocno
*ptr
)
1585 fprintf (stderr
, "<nil>\n");
1589 /* Print info about copies involving allocno A into stderr. */
1591 ira_debug_allocno_copies (ira_allocno_t a
)
1593 print_allocno_copies (stderr
, a
);
1596 /* The function frees memory allocated for copy CP. */
1598 finish_copy (ira_copy_t cp
)
1600 copy_pool
.remove (cp
);
1604 /* Free memory allocated for all copies. */
1606 finish_copies (void)
1609 ira_copy_iterator ci
;
1611 FOR_EACH_COPY (cp
, ci
)
1613 copy_vec
.release ();
1614 copy_pool
.release ();
1619 /* Pools for cost vectors. It is defined only for allocno classes. */
1620 static pool_allocator
*cost_vector_pool
[N_REG_CLASSES
];
1622 /* The function initiates work with hard register cost vectors. It
1623 creates allocation pool for each allocno class. */
1625 initiate_cost_vectors (void)
1628 enum reg_class aclass
;
1630 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1632 aclass
= ira_allocno_classes
[i
];
1633 cost_vector_pool
[aclass
] = new pool_allocator
1634 ("cost vectors", 100,
1635 sizeof (int) * (ira_class_hard_regs_num
[aclass
]));
1639 /* Allocate and return a cost vector VEC for ACLASS. */
1641 ira_allocate_cost_vector (reg_class_t aclass
)
1643 return (int*) cost_vector_pool
[(int) aclass
]->allocate ();
1646 /* Free a cost vector VEC for ACLASS. */
1648 ira_free_cost_vector (int *vec
, reg_class_t aclass
)
1650 ira_assert (vec
!= NULL
);
1651 cost_vector_pool
[(int) aclass
]->remove (vec
);
1654 /* Finish work with hard register cost vectors. Release allocation
1655 pool for each allocno class. */
1657 finish_cost_vectors (void)
1660 enum reg_class aclass
;
1662 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
1664 aclass
= ira_allocno_classes
[i
];
1665 delete cost_vector_pool
[aclass
];
1671 /* Compute a post-ordering of the reverse control flow of the loop body
1672 designated by the children nodes of LOOP_NODE, whose body nodes in
1673 pre-order are input as LOOP_PREORDER. Return a VEC with a post-order
1674 of the reverse loop body.
1676 For the post-order of the reverse CFG, we visit the basic blocks in
1677 LOOP_PREORDER array in the reverse order of where they appear.
1678 This is important: We do not just want to compute a post-order of
1679 the reverse CFG, we want to make a best-guess for a visiting order that
1680 minimizes the number of chain elements per allocno live range. If the
1681 blocks would be visited in a different order, we would still compute a
1682 correct post-ordering but it would be less likely that two nodes
1683 connected by an edge in the CFG are neighbours in the topsort. */
1685 static vec
<ira_loop_tree_node_t
>
1686 ira_loop_tree_body_rev_postorder (ira_loop_tree_node_t loop_node ATTRIBUTE_UNUSED
,
1687 vec
<ira_loop_tree_node_t
> loop_preorder
)
1689 vec
<ira_loop_tree_node_t
> topsort_nodes
= vNULL
;
1690 unsigned int n_loop_preorder
;
1692 n_loop_preorder
= loop_preorder
.length ();
1693 if (n_loop_preorder
!= 0)
1695 ira_loop_tree_node_t subloop_node
;
1697 auto_vec
<ira_loop_tree_node_t
> dfs_stack
;
1699 /* This is a bit of strange abuse of the BB_VISITED flag: We use
1700 the flag to mark blocks we still have to visit to add them to
1701 our post-order. Define an alias to avoid confusion. */
1702 #define BB_TO_VISIT BB_VISITED
1704 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1706 gcc_checking_assert (! (subloop_node
->bb
->flags
& BB_TO_VISIT
));
1707 subloop_node
->bb
->flags
|= BB_TO_VISIT
;
1710 topsort_nodes
.create (n_loop_preorder
);
1711 dfs_stack
.create (n_loop_preorder
);
1713 FOR_EACH_VEC_ELT_REVERSE (loop_preorder
, i
, subloop_node
)
1715 if (! (subloop_node
->bb
->flags
& BB_TO_VISIT
))
1718 subloop_node
->bb
->flags
&= ~BB_TO_VISIT
;
1719 dfs_stack
.quick_push (subloop_node
);
1720 while (! dfs_stack
.is_empty ())
1725 ira_loop_tree_node_t n
= dfs_stack
.last ();
1726 FOR_EACH_EDGE (e
, ei
, n
->bb
->preds
)
1728 ira_loop_tree_node_t pred_node
;
1729 basic_block pred_bb
= e
->src
;
1731 if (e
->src
== ENTRY_BLOCK_PTR_FOR_FN (cfun
))
1734 pred_node
= IRA_BB_NODE_BY_INDEX (pred_bb
->index
);
1736 && (pred_node
->bb
->flags
& BB_TO_VISIT
))
1738 pred_node
->bb
->flags
&= ~BB_TO_VISIT
;
1739 dfs_stack
.quick_push (pred_node
);
1742 if (n
== dfs_stack
.last ())
1745 topsort_nodes
.quick_push (n
);
1753 gcc_assert (topsort_nodes
.length () == n_loop_preorder
);
1754 return topsort_nodes
;
1757 /* The current loop tree node and its regno allocno map. */
1758 ira_loop_tree_node_t ira_curr_loop_tree_node
;
1759 ira_allocno_t
*ira_curr_regno_allocno_map
;
1761 /* This recursive function traverses loop tree with root LOOP_NODE
1762 calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1763 correspondingly in preorder and postorder. The function sets up
1764 IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP. If BB_P,
1765 basic block nodes of LOOP_NODE is also processed (before its
1768 If BB_P is set and POSTORDER_FUNC is given, the basic blocks in
1769 the loop are passed in the *reverse* post-order of the *reverse*
1770 CFG. This is only used by ira_create_allocno_live_ranges, which
1771 wants to visit basic blocks in this order to minimize the number
1772 of elements per live range chain.
1773 Note that the loop tree nodes are still visited in the normal,
1774 forward post-order of the loop tree. */
1777 ira_traverse_loop_tree (bool bb_p
, ira_loop_tree_node_t loop_node
,
1778 void (*preorder_func
) (ira_loop_tree_node_t
),
1779 void (*postorder_func
) (ira_loop_tree_node_t
))
1781 ira_loop_tree_node_t subloop_node
;
1783 ira_assert (loop_node
->bb
== NULL
);
1784 ira_curr_loop_tree_node
= loop_node
;
1785 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1787 if (preorder_func
!= NULL
)
1788 (*preorder_func
) (loop_node
);
1792 auto_vec
<ira_loop_tree_node_t
> loop_preorder
;
1795 /* Add all nodes to the set of nodes to visit. The IRA loop tree
1796 is set up such that nodes in the loop body appear in a pre-order
1797 of their place in the CFG. */
1798 for (subloop_node
= loop_node
->children
;
1799 subloop_node
!= NULL
;
1800 subloop_node
= subloop_node
->next
)
1801 if (subloop_node
->bb
!= NULL
)
1802 loop_preorder
.safe_push (subloop_node
);
1804 if (preorder_func
!= NULL
)
1805 FOR_EACH_VEC_ELT (loop_preorder
, i
, subloop_node
)
1806 (*preorder_func
) (subloop_node
);
1808 if (postorder_func
!= NULL
)
1810 vec
<ira_loop_tree_node_t
> loop_rev_postorder
=
1811 ira_loop_tree_body_rev_postorder (loop_node
, loop_preorder
);
1812 FOR_EACH_VEC_ELT_REVERSE (loop_rev_postorder
, i
, subloop_node
)
1813 (*postorder_func
) (subloop_node
);
1814 loop_rev_postorder
.release ();
1818 for (subloop_node
= loop_node
->subloops
;
1819 subloop_node
!= NULL
;
1820 subloop_node
= subloop_node
->subloop_next
)
1822 ira_assert (subloop_node
->bb
== NULL
);
1823 ira_traverse_loop_tree (bb_p
, subloop_node
,
1824 preorder_func
, postorder_func
);
1827 ira_curr_loop_tree_node
= loop_node
;
1828 ira_curr_regno_allocno_map
= ira_curr_loop_tree_node
->regno_allocno_map
;
1830 if (postorder_func
!= NULL
)
1831 (*postorder_func
) (loop_node
);
1836 /* The basic block currently being processed. */
1837 static basic_block curr_bb
;
1839 /* This recursive function creates allocnos corresponding to
1840 pseudo-registers containing in X. True OUTPUT_P means that X is
1841 an lvalue. PARENT corresponds to the parent expression of X. */
1843 create_insn_allocnos (rtx x
, rtx outer
, bool output_p
)
1847 enum rtx_code code
= GET_CODE (x
);
1853 if ((regno
= REGNO (x
)) >= FIRST_PSEUDO_REGISTER
)
1857 if ((a
= ira_curr_regno_allocno_map
[regno
]) == NULL
)
1859 a
= ira_create_allocno (regno
, false, ira_curr_loop_tree_node
);
1860 if (outer
!= NULL
&& GET_CODE (outer
) == SUBREG
)
1862 machine_mode wmode
= GET_MODE (outer
);
1863 if (GET_MODE_SIZE (wmode
) > GET_MODE_SIZE (ALLOCNO_WMODE (a
)))
1864 ALLOCNO_WMODE (a
) = wmode
;
1868 ALLOCNO_NREFS (a
)++;
1869 ALLOCNO_FREQ (a
) += REG_FREQ_FROM_BB (curr_bb
);
1871 bitmap_set_bit (ira_curr_loop_tree_node
->modified_regnos
, regno
);
1875 else if (code
== SET
)
1877 create_insn_allocnos (SET_DEST (x
), NULL
, true);
1878 create_insn_allocnos (SET_SRC (x
), NULL
, false);
1881 else if (code
== CLOBBER
)
1883 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1886 else if (code
== MEM
)
1888 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1891 else if (code
== PRE_DEC
|| code
== POST_DEC
|| code
== PRE_INC
||
1892 code
== POST_INC
|| code
== POST_MODIFY
|| code
== PRE_MODIFY
)
1894 create_insn_allocnos (XEXP (x
, 0), NULL
, true);
1895 create_insn_allocnos (XEXP (x
, 0), NULL
, false);
1899 fmt
= GET_RTX_FORMAT (code
);
1900 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
1903 create_insn_allocnos (XEXP (x
, i
), x
, output_p
);
1904 else if (fmt
[i
] == 'E')
1905 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
1906 create_insn_allocnos (XVECEXP (x
, i
, j
), x
, output_p
);
1910 /* Create allocnos corresponding to pseudo-registers living in the
1911 basic block represented by the corresponding loop tree node
1914 create_bb_allocnos (ira_loop_tree_node_t bb_node
)
1921 curr_bb
= bb
= bb_node
->bb
;
1922 ira_assert (bb
!= NULL
);
1923 FOR_BB_INSNS_REVERSE (bb
, insn
)
1924 if (NONDEBUG_INSN_P (insn
))
1925 create_insn_allocnos (PATTERN (insn
), NULL
, false);
1926 /* It might be a allocno living through from one subloop to
1928 EXECUTE_IF_SET_IN_REG_SET (df_get_live_in (bb
), FIRST_PSEUDO_REGISTER
, i
, bi
)
1929 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1930 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1933 /* Create allocnos corresponding to pseudo-registers living on edge E
1934 (a loop entry or exit). Also mark the allocnos as living on the
1937 create_loop_allocnos (edge e
)
1940 bitmap live_in_regs
, border_allocnos
;
1942 ira_loop_tree_node_t parent
;
1944 live_in_regs
= df_get_live_in (e
->dest
);
1945 border_allocnos
= ira_curr_loop_tree_node
->border_allocnos
;
1946 EXECUTE_IF_SET_IN_REG_SET (df_get_live_out (e
->src
),
1947 FIRST_PSEUDO_REGISTER
, i
, bi
)
1948 if (bitmap_bit_p (live_in_regs
, i
))
1950 if (ira_curr_regno_allocno_map
[i
] == NULL
)
1952 /* The order of creations is important for right
1953 ira_regno_allocno_map. */
1954 if ((parent
= ira_curr_loop_tree_node
->parent
) != NULL
1955 && parent
->regno_allocno_map
[i
] == NULL
)
1956 ira_create_allocno (i
, false, parent
);
1957 ira_create_allocno (i
, false, ira_curr_loop_tree_node
);
1959 bitmap_set_bit (border_allocnos
,
1960 ALLOCNO_NUM (ira_curr_regno_allocno_map
[i
]));
1964 /* Create allocnos corresponding to pseudo-registers living in loop
1965 represented by the corresponding loop tree node LOOP_NODE. This
1966 function is called by ira_traverse_loop_tree. */
1968 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node
)
1970 if (loop_node
->bb
!= NULL
)
1971 create_bb_allocnos (loop_node
);
1972 else if (loop_node
!= ira_loop_tree_root
)
1979 ira_assert (current_loops
!= NULL
);
1980 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
1981 if (e
->src
!= loop_node
->loop
->latch
)
1982 create_loop_allocnos (e
);
1984 edges
= get_loop_exit_edges (loop_node
->loop
);
1985 FOR_EACH_VEC_ELT (edges
, i
, e
)
1986 create_loop_allocnos (e
);
1991 /* Propagate information about allocnos modified inside the loop given
1992 by its LOOP_TREE_NODE to its parent. */
1994 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node
)
1996 if (loop_tree_node
== ira_loop_tree_root
)
1998 ira_assert (loop_tree_node
->bb
== NULL
);
1999 bitmap_ior_into (loop_tree_node
->parent
->modified_regnos
,
2000 loop_tree_node
->modified_regnos
);
2003 /* Propagate new info about allocno A (see comments about accumulated
2004 info in allocno definition) to the corresponding allocno on upper
2005 loop tree level. So allocnos on upper levels accumulate
2006 information about the corresponding allocnos in nested regions.
2007 The new info means allocno info finally calculated in this
2010 propagate_allocno_info (void)
2013 ira_allocno_t a
, parent_a
;
2014 ira_loop_tree_node_t parent
;
2015 enum reg_class aclass
;
2017 if (flag_ira_region
!= IRA_REGION_ALL
2018 && flag_ira_region
!= IRA_REGION_MIXED
)
2020 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2021 for (a
= ira_regno_allocno_map
[i
];
2023 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2024 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) != NULL
2025 && (parent_a
= parent
->regno_allocno_map
[i
]) != NULL
2026 /* There are no caps yet at this point. So use
2027 border_allocnos to find allocnos for the propagation. */
2028 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a
)->border_allocnos
,
2031 if (! ALLOCNO_BAD_SPILL_P (a
))
2032 ALLOCNO_BAD_SPILL_P (parent_a
) = false;
2033 ALLOCNO_NREFS (parent_a
) += ALLOCNO_NREFS (a
);
2034 ALLOCNO_FREQ (parent_a
) += ALLOCNO_FREQ (a
);
2035 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
2036 merge_hard_reg_conflicts (a
, parent_a
, true);
2037 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
2038 += ALLOCNO_CALLS_CROSSED_NUM (a
);
2039 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
2040 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
2041 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
),
2042 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
2043 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
2044 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2045 aclass
= ALLOCNO_CLASS (a
);
2046 ira_assert (aclass
== ALLOCNO_CLASS (parent_a
));
2047 ira_allocate_and_accumulate_costs
2048 (&ALLOCNO_HARD_REG_COSTS (parent_a
), aclass
,
2049 ALLOCNO_HARD_REG_COSTS (a
));
2050 ira_allocate_and_accumulate_costs
2051 (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
),
2053 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
2054 ALLOCNO_CLASS_COST (parent_a
)
2055 += ALLOCNO_CLASS_COST (a
);
2056 ALLOCNO_MEMORY_COST (parent_a
) += ALLOCNO_MEMORY_COST (a
);
2060 /* Create allocnos corresponding to pseudo-registers in the current
2061 function. Traverse the loop tree for this. */
2063 create_allocnos (void)
2065 /* We need to process BB first to correctly link allocnos by member
2066 next_regno_allocno. */
2067 ira_traverse_loop_tree (true, ira_loop_tree_root
,
2068 create_loop_tree_node_allocnos
, NULL
);
2070 ira_traverse_loop_tree (false, ira_loop_tree_root
, NULL
,
2071 propagate_modified_regnos
);
2076 /* The page contains function to remove some regions from a separate
2077 register allocation. We remove regions whose separate allocation
2078 will hardly improve the result. As a result we speed up regional
2079 register allocation. */
2081 /* The function changes the object in range list given by R to OBJ. */
2083 change_object_in_range_list (live_range_t r
, ira_object_t obj
)
2085 for (; r
!= NULL
; r
= r
->next
)
2089 /* Move all live ranges associated with allocno FROM to allocno TO. */
2091 move_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2094 int n
= ALLOCNO_NUM_OBJECTS (from
);
2096 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2098 for (i
= 0; i
< n
; i
++)
2100 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2101 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2102 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2104 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2106 fprintf (ira_dump_file
,
2107 " Moving ranges of a%dr%d to a%dr%d: ",
2108 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2109 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2110 ira_print_live_range_list (ira_dump_file
, lr
);
2112 change_object_in_range_list (lr
, to_obj
);
2113 OBJECT_LIVE_RANGES (to_obj
)
2114 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2115 OBJECT_LIVE_RANGES (from_obj
) = NULL
;
2120 copy_allocno_live_ranges (ira_allocno_t from
, ira_allocno_t to
)
2123 int n
= ALLOCNO_NUM_OBJECTS (from
);
2125 gcc_assert (n
== ALLOCNO_NUM_OBJECTS (to
));
2127 for (i
= 0; i
< n
; i
++)
2129 ira_object_t from_obj
= ALLOCNO_OBJECT (from
, i
);
2130 ira_object_t to_obj
= ALLOCNO_OBJECT (to
, i
);
2131 live_range_t lr
= OBJECT_LIVE_RANGES (from_obj
);
2133 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2135 fprintf (ira_dump_file
, " Copying ranges of a%dr%d to a%dr%d: ",
2136 ALLOCNO_NUM (from
), ALLOCNO_REGNO (from
),
2137 ALLOCNO_NUM (to
), ALLOCNO_REGNO (to
));
2138 ira_print_live_range_list (ira_dump_file
, lr
);
2140 lr
= ira_copy_live_range_list (lr
);
2141 change_object_in_range_list (lr
, to_obj
);
2142 OBJECT_LIVE_RANGES (to_obj
)
2143 = ira_merge_live_ranges (lr
, OBJECT_LIVE_RANGES (to_obj
));
2147 /* Return TRUE if NODE represents a loop with low register
2150 low_pressure_loop_node_p (ira_loop_tree_node_t node
)
2153 enum reg_class pclass
;
2155 if (node
->bb
!= NULL
)
2158 for (i
= 0; i
< ira_pressure_classes_num
; i
++)
2160 pclass
= ira_pressure_classes
[i
];
2161 if (node
->reg_pressure
[pclass
] > ira_class_hard_regs_num
[pclass
]
2162 && ira_class_hard_regs_num
[pclass
] > 1)
2169 /* Return TRUE if LOOP has a complex enter or exit edge. We don't
2170 form a region from such loop if the target use stack register
2171 because reg-stack.c can not deal with such edges. */
2173 loop_with_complex_edge_p (struct loop
*loop
)
2181 FOR_EACH_EDGE (e
, ei
, loop
->header
->preds
)
2182 if (e
->flags
& EDGE_EH
)
2184 edges
= get_loop_exit_edges (loop
);
2186 FOR_EACH_VEC_ELT (edges
, i
, e
)
2187 if (e
->flags
& EDGE_COMPLEX
)
2197 /* Sort loops for marking them for removal. We put already marked
2198 loops first, then less frequent loops next, and then outer loops
2201 loop_compare_func (const void *v1p
, const void *v2p
)
2204 ira_loop_tree_node_t l1
= *(const ira_loop_tree_node_t
*) v1p
;
2205 ira_loop_tree_node_t l2
= *(const ira_loop_tree_node_t
*) v2p
;
2207 ira_assert (l1
->parent
!= NULL
&& l2
->parent
!= NULL
);
2208 if (l1
->to_remove_p
&& ! l2
->to_remove_p
)
2210 if (! l1
->to_remove_p
&& l2
->to_remove_p
)
2212 if ((diff
= l1
->loop
->header
->frequency
- l2
->loop
->header
->frequency
) != 0)
2214 if ((diff
= (int) loop_depth (l1
->loop
) - (int) loop_depth (l2
->loop
)) != 0)
2216 /* Make sorting stable. */
2217 return l1
->loop_num
- l2
->loop_num
;
2220 /* Mark loops which should be removed from regional allocation. We
2221 remove a loop with low register pressure inside another loop with
2222 register pressure. In this case a separate allocation of the loop
2223 hardly helps (for irregular register file architecture it could
2224 help by choosing a better hard register in the loop but we prefer
2225 faster allocation even in this case). We also remove cheap loops
2226 if there are more than IRA_MAX_LOOPS_NUM of them. Loop with EH
2227 exit or enter edges are removed too because the allocation might
2228 require put pseudo moves on the EH edges (we could still do this
2229 for pseudos with caller saved hard registers in some cases but it
2230 is impossible to say here or during top-down allocation pass what
2231 hard register the pseudos get finally). */
2233 mark_loops_for_removal (void)
2236 ira_loop_tree_node_t
*sorted_loops
;
2239 ira_assert (current_loops
!= NULL
);
2241 = (ira_loop_tree_node_t
*) ira_allocate (sizeof (ira_loop_tree_node_t
)
2242 * number_of_loops (cfun
));
2243 for (n
= i
= 0; vec_safe_iterate (get_loops (cfun
), i
, &loop
); i
++)
2244 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2246 if (ira_loop_nodes
[i
].parent
== NULL
)
2248 /* Don't remove the root. */
2249 ira_loop_nodes
[i
].to_remove_p
= false;
2252 sorted_loops
[n
++] = &ira_loop_nodes
[i
];
2253 ira_loop_nodes
[i
].to_remove_p
2254 = ((low_pressure_loop_node_p (ira_loop_nodes
[i
].parent
)
2255 && low_pressure_loop_node_p (&ira_loop_nodes
[i
]))
2257 || loop_with_complex_edge_p (ira_loop_nodes
[i
].loop
)
2261 qsort (sorted_loops
, n
, sizeof (ira_loop_tree_node_t
), loop_compare_func
);
2262 for (i
= 0; n
- i
+ 1 > IRA_MAX_LOOPS_NUM
; i
++)
2264 sorted_loops
[i
]->to_remove_p
= true;
2265 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2268 " Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
2269 sorted_loops
[i
]->loop_num
, sorted_loops
[i
]->loop
->header
->index
,
2270 sorted_loops
[i
]->loop
->header
->frequency
,
2271 loop_depth (sorted_loops
[i
]->loop
),
2272 low_pressure_loop_node_p (sorted_loops
[i
]->parent
)
2273 && low_pressure_loop_node_p (sorted_loops
[i
])
2274 ? "low pressure" : "cheap loop");
2276 ira_free (sorted_loops
);
2279 /* Mark all loops but root for removing. */
2281 mark_all_loops_for_removal (void)
2286 ira_assert (current_loops
!= NULL
);
2287 FOR_EACH_VEC_SAFE_ELT (get_loops (cfun
), i
, loop
)
2288 if (ira_loop_nodes
[i
].regno_allocno_map
!= NULL
)
2290 if (ira_loop_nodes
[i
].parent
== NULL
)
2292 /* Don't remove the root. */
2293 ira_loop_nodes
[i
].to_remove_p
= false;
2296 ira_loop_nodes
[i
].to_remove_p
= true;
2297 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2300 " Mark loop %d (header %d, freq %d, depth %d) for removal\n",
2301 ira_loop_nodes
[i
].loop_num
,
2302 ira_loop_nodes
[i
].loop
->header
->index
,
2303 ira_loop_nodes
[i
].loop
->header
->frequency
,
2304 loop_depth (ira_loop_nodes
[i
].loop
));
2308 /* Definition of vector of loop tree nodes. */
2310 /* Vec containing references to all removed loop tree nodes. */
2311 static vec
<ira_loop_tree_node_t
> removed_loop_vec
;
2313 /* Vec containing references to all children of loop tree nodes. */
2314 static vec
<ira_loop_tree_node_t
> children_vec
;
2316 /* Remove subregions of NODE if their separate allocation will not
2317 improve the result. */
2319 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node
)
2323 ira_loop_tree_node_t subnode
;
2325 remove_p
= node
->to_remove_p
;
2327 children_vec
.safe_push (node
);
2328 start
= children_vec
.length ();
2329 for (subnode
= node
->children
; subnode
!= NULL
; subnode
= subnode
->next
)
2330 if (subnode
->bb
== NULL
)
2331 remove_uneccesary_loop_nodes_from_loop_tree (subnode
);
2333 children_vec
.safe_push (subnode
);
2334 node
->children
= node
->subloops
= NULL
;
2337 removed_loop_vec
.safe_push (node
);
2340 while (children_vec
.length () > start
)
2342 subnode
= children_vec
.pop ();
2343 subnode
->parent
= node
;
2344 subnode
->next
= node
->children
;
2345 node
->children
= subnode
;
2346 if (subnode
->bb
== NULL
)
2348 subnode
->subloop_next
= node
->subloops
;
2349 node
->subloops
= subnode
;
2354 /* Return TRUE if NODE is inside PARENT. */
2356 loop_is_inside_p (ira_loop_tree_node_t node
, ira_loop_tree_node_t parent
)
2358 for (node
= node
->parent
; node
!= NULL
; node
= node
->parent
)
2364 /* Sort allocnos according to their order in regno allocno list. */
2366 regno_allocno_order_compare_func (const void *v1p
, const void *v2p
)
2368 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2369 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2370 ira_loop_tree_node_t n1
= ALLOCNO_LOOP_TREE_NODE (a1
);
2371 ira_loop_tree_node_t n2
= ALLOCNO_LOOP_TREE_NODE (a2
);
2373 if (loop_is_inside_p (n1
, n2
))
2375 else if (loop_is_inside_p (n2
, n1
))
2377 /* If allocnos are equally good, sort by allocno numbers, so that
2378 the results of qsort leave nothing to chance. We put allocnos
2379 with higher number first in the list because it is the original
2380 order for allocnos from loops on the same levels. */
2381 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2384 /* This array is used to sort allocnos to restore allocno order in
2385 the regno allocno list. */
2386 static ira_allocno_t
*regno_allocnos
;
2388 /* Restore allocno order for REGNO in the regno allocno list. */
2390 ira_rebuild_regno_allocno_list (int regno
)
2395 for (n
= 0, a
= ira_regno_allocno_map
[regno
];
2397 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2398 regno_allocnos
[n
++] = a
;
2400 qsort (regno_allocnos
, n
, sizeof (ira_allocno_t
),
2401 regno_allocno_order_compare_func
);
2402 for (i
= 1; i
< n
; i
++)
2403 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[i
- 1]) = regno_allocnos
[i
];
2404 ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos
[n
- 1]) = NULL
;
2405 ira_regno_allocno_map
[regno
] = regno_allocnos
[0];
2406 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2407 fprintf (ira_dump_file
, " Rebuilding regno allocno list for %d\n", regno
);
2410 /* Propagate info from allocno FROM_A to allocno A. */
2412 propagate_some_info_from_allocno (ira_allocno_t a
, ira_allocno_t from_a
)
2414 enum reg_class aclass
;
2416 merge_hard_reg_conflicts (from_a
, a
, false);
2417 ALLOCNO_NREFS (a
) += ALLOCNO_NREFS (from_a
);
2418 ALLOCNO_FREQ (a
) += ALLOCNO_FREQ (from_a
);
2419 ALLOCNO_CALL_FREQ (a
) += ALLOCNO_CALL_FREQ (from_a
);
2420 ALLOCNO_CALLS_CROSSED_NUM (a
) += ALLOCNO_CALLS_CROSSED_NUM (from_a
);
2421 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
)
2422 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (from_a
);
2423 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
),
2424 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (from_a
));
2426 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2427 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a
);
2428 if (! ALLOCNO_BAD_SPILL_P (from_a
))
2429 ALLOCNO_BAD_SPILL_P (a
) = false;
2430 aclass
= ALLOCNO_CLASS (from_a
);
2431 ira_assert (aclass
== ALLOCNO_CLASS (a
));
2432 ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a
), aclass
,
2433 ALLOCNO_HARD_REG_COSTS (from_a
));
2434 ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
2436 ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a
));
2437 ALLOCNO_CLASS_COST (a
) += ALLOCNO_CLASS_COST (from_a
);
2438 ALLOCNO_MEMORY_COST (a
) += ALLOCNO_MEMORY_COST (from_a
);
2441 /* Remove allocnos from loops removed from the allocation
2444 remove_unnecessary_allocnos (void)
2447 bool merged_p
, rebuild_p
;
2448 ira_allocno_t a
, prev_a
, next_a
, parent_a
;
2449 ira_loop_tree_node_t a_node
, parent
;
2452 regno_allocnos
= NULL
;
2453 for (regno
= max_reg_num () - 1; regno
>= FIRST_PSEUDO_REGISTER
; regno
--)
2456 for (prev_a
= NULL
, a
= ira_regno_allocno_map
[regno
];
2460 next_a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
);
2461 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2462 if (! a_node
->to_remove_p
)
2466 for (parent
= a_node
->parent
;
2467 (parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
2468 && parent
->to_remove_p
;
2469 parent
= parent
->parent
)
2471 if (parent_a
== NULL
)
2473 /* There are no allocnos with the same regno in
2474 upper region -- just move the allocno to the
2477 ALLOCNO_LOOP_TREE_NODE (a
) = parent
;
2478 parent
->regno_allocno_map
[regno
] = a
;
2479 bitmap_set_bit (parent
->all_allocnos
, ALLOCNO_NUM (a
));
2484 /* Remove the allocno and update info of allocno in
2485 the upper region. */
2487 ira_regno_allocno_map
[regno
] = next_a
;
2489 ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a
) = next_a
;
2490 move_allocno_live_ranges (a
, parent_a
);
2492 propagate_some_info_from_allocno (parent_a
, a
);
2493 /* Remove it from the corresponding regno allocno
2494 map to avoid info propagation of subsequent
2495 allocno into this already removed allocno. */
2496 a_node
->regno_allocno_map
[regno
] = NULL
;
2497 ira_remove_allocno_prefs (a
);
2503 /* We need to restore the order in regno allocno list. */
2505 if (regno_allocnos
== NULL
)
2507 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
2508 * ira_allocnos_num
);
2509 ira_rebuild_regno_allocno_list (regno
);
2513 ira_rebuild_start_finish_chains ();
2514 if (regno_allocnos
!= NULL
)
2515 ira_free (regno_allocnos
);
2518 /* Remove allocnos from all loops but the root. */
2520 remove_low_level_allocnos (void)
2523 bool merged_p
, propagate_p
;
2524 ira_allocno_t a
, top_a
;
2525 ira_loop_tree_node_t a_node
, parent
;
2526 ira_allocno_iterator ai
;
2529 FOR_EACH_ALLOCNO (a
, ai
)
2531 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2532 if (a_node
== ira_loop_tree_root
|| ALLOCNO_CAP_MEMBER (a
) != NULL
)
2534 regno
= ALLOCNO_REGNO (a
);
2535 if ((top_a
= ira_loop_tree_root
->regno_allocno_map
[regno
]) == NULL
)
2537 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
2538 ira_loop_tree_root
->regno_allocno_map
[regno
] = a
;
2541 propagate_p
= a_node
->parent
->regno_allocno_map
[regno
] == NULL
;
2542 /* Remove the allocno and update info of allocno in the upper
2544 move_allocno_live_ranges (a
, top_a
);
2547 propagate_some_info_from_allocno (top_a
, a
);
2549 FOR_EACH_ALLOCNO (a
, ai
)
2551 a_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2552 if (a_node
== ira_loop_tree_root
)
2554 parent
= a_node
->parent
;
2555 regno
= ALLOCNO_REGNO (a
);
2556 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2557 ira_assert (ALLOCNO_CAP (a
) != NULL
);
2558 else if (ALLOCNO_CAP (a
) == NULL
)
2559 ira_assert (parent
->regno_allocno_map
[regno
] != NULL
);
2561 FOR_EACH_ALLOCNO (a
, ai
)
2563 regno
= ALLOCNO_REGNO (a
);
2564 if (ira_loop_tree_root
->regno_allocno_map
[regno
] == a
)
2567 ira_allocno_object_iterator oi
;
2569 ira_regno_allocno_map
[regno
] = a
;
2570 ALLOCNO_NEXT_REGNO_ALLOCNO (a
) = NULL
;
2571 ALLOCNO_CAP_MEMBER (a
) = NULL
;
2572 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2573 COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj
),
2574 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
2576 if (ALLOCNO_TOTAL_NO_STACK_REG_P (a
))
2577 ALLOCNO_NO_STACK_REG_P (a
) = true;
2582 ira_remove_allocno_prefs (a
);
2587 ira_rebuild_start_finish_chains ();
2590 /* Remove loops from consideration. We remove all loops except for
2591 root if ALL_P or loops for which a separate allocation will not
2592 improve the result. We have to do this after allocno creation and
2593 their costs and allocno class evaluation because only after that
2594 the register pressure can be known and is calculated. */
2596 remove_unnecessary_regions (bool all_p
)
2598 if (current_loops
== NULL
)
2601 mark_all_loops_for_removal ();
2603 mark_loops_for_removal ();
2604 children_vec
.create (last_basic_block_for_fn (cfun
)
2605 + number_of_loops (cfun
));
2606 removed_loop_vec
.create (last_basic_block_for_fn (cfun
)
2607 + number_of_loops (cfun
));
2608 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root
);
2609 children_vec
.release ();
2611 remove_low_level_allocnos ();
2613 remove_unnecessary_allocnos ();
2614 while (removed_loop_vec
.length () > 0)
2615 finish_loop_tree_node (removed_loop_vec
.pop ());
2616 removed_loop_vec
.release ();
2621 /* At this point true value of allocno attribute bad_spill_p means
2622 that there is an insn where allocno occurs and where the allocno
2623 can not be used as memory. The function updates the attribute, now
2624 it can be true only for allocnos which can not be used as memory in
2625 an insn and in whose live ranges there is other allocno deaths.
2626 Spilling allocnos with true value will not improve the code because
2627 it will not make other allocnos colorable and additional reloads
2628 for the corresponding pseudo will be generated in reload pass for
2629 each insn it occurs.
2631 This is a trick mentioned in one classic article of Chaitin etc
2632 which is frequently omitted in other implementations of RA based on
2635 update_bad_spill_attribute (void)
2639 ira_allocno_iterator ai
;
2640 ira_allocno_object_iterator aoi
;
2643 enum reg_class aclass
;
2644 bitmap_head dead_points
[N_REG_CLASSES
];
2646 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2648 aclass
= ira_allocno_classes
[i
];
2649 bitmap_initialize (&dead_points
[aclass
], ®_obstack
);
2651 FOR_EACH_ALLOCNO (a
, ai
)
2653 aclass
= ALLOCNO_CLASS (a
);
2654 if (aclass
== NO_REGS
)
2656 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2657 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2658 bitmap_set_bit (&dead_points
[aclass
], r
->finish
);
2660 FOR_EACH_ALLOCNO (a
, ai
)
2662 aclass
= ALLOCNO_CLASS (a
);
2663 if (aclass
== NO_REGS
)
2665 if (! ALLOCNO_BAD_SPILL_P (a
))
2667 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, aoi
)
2669 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
2671 for (i
= r
->start
+ 1; i
< r
->finish
; i
++)
2672 if (bitmap_bit_p (&dead_points
[aclass
], i
))
2679 ALLOCNO_BAD_SPILL_P (a
) = false;
2684 for (i
= 0; i
< ira_allocno_classes_num
; i
++)
2686 aclass
= ira_allocno_classes
[i
];
2687 bitmap_clear (&dead_points
[aclass
]);
2693 /* Set up minimal and maximal live range points for allocnos. */
2695 setup_min_max_allocno_live_range_point (void)
2698 ira_allocno_t a
, parent_a
, cap
;
2699 ira_allocno_iterator ai
;
2700 #ifdef ENABLE_IRA_CHECKING
2701 ira_object_iterator oi
;
2705 ira_loop_tree_node_t parent
;
2707 FOR_EACH_ALLOCNO (a
, ai
)
2709 int n
= ALLOCNO_NUM_OBJECTS (a
);
2711 for (i
= 0; i
< n
; i
++)
2713 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2714 r
= OBJECT_LIVE_RANGES (obj
);
2717 OBJECT_MAX (obj
) = r
->finish
;
2718 for (; r
->next
!= NULL
; r
= r
->next
)
2720 OBJECT_MIN (obj
) = r
->start
;
2723 for (i
= max_reg_num () - 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
2724 for (a
= ira_regno_allocno_map
[i
];
2726 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
2729 int n
= ALLOCNO_NUM_OBJECTS (a
);
2731 for (j
= 0; j
< n
; j
++)
2733 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
2734 ira_object_t parent_obj
;
2736 if (OBJECT_MAX (obj
) < 0)
2738 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2739 /* Accumulation of range info. */
2740 if (ALLOCNO_CAP (a
) != NULL
)
2742 for (cap
= ALLOCNO_CAP (a
); cap
!= NULL
; cap
= ALLOCNO_CAP (cap
))
2744 ira_object_t cap_obj
= ALLOCNO_OBJECT (cap
, j
);
2745 if (OBJECT_MAX (cap_obj
) < OBJECT_MAX (obj
))
2746 OBJECT_MAX (cap_obj
) = OBJECT_MAX (obj
);
2747 if (OBJECT_MIN (cap_obj
) > OBJECT_MIN (obj
))
2748 OBJECT_MIN (cap_obj
) = OBJECT_MIN (obj
);
2752 if ((parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
) == NULL
)
2754 parent_a
= parent
->regno_allocno_map
[i
];
2755 parent_obj
= ALLOCNO_OBJECT (parent_a
, j
);
2756 if (OBJECT_MAX (parent_obj
) < OBJECT_MAX (obj
))
2757 OBJECT_MAX (parent_obj
) = OBJECT_MAX (obj
);
2758 if (OBJECT_MIN (parent_obj
) > OBJECT_MIN (obj
))
2759 OBJECT_MIN (parent_obj
) = OBJECT_MIN (obj
);
2762 #ifdef ENABLE_IRA_CHECKING
2763 FOR_EACH_OBJECT (obj
, oi
)
2765 if ((0 <= OBJECT_MIN (obj
) && OBJECT_MIN (obj
) <= ira_max_point
)
2766 && (0 <= OBJECT_MAX (obj
) && OBJECT_MAX (obj
) <= ira_max_point
))
2773 /* Sort allocnos according to their live ranges. Allocnos with
2774 smaller allocno class are put first unless we use priority
2775 coloring. Allocnos with the same class are ordered according
2776 their start (min). Allocnos with the same start are ordered
2777 according their finish (max). */
2779 object_range_compare_func (const void *v1p
, const void *v2p
)
2782 ira_object_t obj1
= *(const ira_object_t
*) v1p
;
2783 ira_object_t obj2
= *(const ira_object_t
*) v2p
;
2784 ira_allocno_t a1
= OBJECT_ALLOCNO (obj1
);
2785 ira_allocno_t a2
= OBJECT_ALLOCNO (obj2
);
2787 if ((diff
= OBJECT_MIN (obj1
) - OBJECT_MIN (obj2
)) != 0)
2789 if ((diff
= OBJECT_MAX (obj1
) - OBJECT_MAX (obj2
)) != 0)
2791 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2794 /* Sort ira_object_id_map and set up conflict id of allocnos. */
2796 sort_conflict_id_map (void)
2800 ira_allocno_iterator ai
;
2803 FOR_EACH_ALLOCNO (a
, ai
)
2805 ira_allocno_object_iterator oi
;
2808 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
2809 ira_object_id_map
[num
++] = obj
;
2812 qsort (ira_object_id_map
, num
, sizeof (ira_object_t
),
2813 object_range_compare_func
);
2814 for (i
= 0; i
< num
; i
++)
2816 ira_object_t obj
= ira_object_id_map
[i
];
2818 gcc_assert (obj
!= NULL
);
2819 OBJECT_CONFLICT_ID (obj
) = i
;
2821 for (i
= num
; i
< ira_objects_num
; i
++)
2822 ira_object_id_map
[i
] = NULL
;
2825 /* Set up minimal and maximal conflict ids of allocnos with which
2826 given allocno can conflict. */
2828 setup_min_max_conflict_allocno_ids (void)
2831 int i
, j
, min
, max
, start
, finish
, first_not_finished
, filled_area_start
;
2832 int *live_range_min
, *last_lived
;
2833 int word0_min
, word0_max
;
2835 ira_allocno_iterator ai
;
2837 live_range_min
= (int *) ira_allocate (sizeof (int) * ira_objects_num
);
2839 first_not_finished
= -1;
2840 for (i
= 0; i
< ira_objects_num
; i
++)
2842 ira_object_t obj
= ira_object_id_map
[i
];
2847 a
= OBJECT_ALLOCNO (obj
);
2851 aclass
= ALLOCNO_CLASS (a
);
2853 first_not_finished
= i
;
2857 start
= OBJECT_MIN (obj
);
2858 /* If we skip an allocno, the allocno with smaller ids will
2859 be also skipped because of the secondary sorting the
2860 range finishes (see function
2861 object_range_compare_func). */
2862 while (first_not_finished
< i
2863 && start
> OBJECT_MAX (ira_object_id_map
2864 [first_not_finished
]))
2865 first_not_finished
++;
2866 min
= first_not_finished
;
2869 /* We could increase min further in this case but it is good
2872 live_range_min
[i
] = OBJECT_MIN (obj
);
2873 OBJECT_MIN (obj
) = min
;
2875 last_lived
= (int *) ira_allocate (sizeof (int) * ira_max_point
);
2877 filled_area_start
= -1;
2878 for (i
= ira_objects_num
- 1; i
>= 0; i
--)
2880 ira_object_t obj
= ira_object_id_map
[i
];
2885 a
= OBJECT_ALLOCNO (obj
);
2888 aclass
= ALLOCNO_CLASS (a
);
2889 for (j
= 0; j
< ira_max_point
; j
++)
2891 filled_area_start
= ira_max_point
;
2893 min
= live_range_min
[i
];
2894 finish
= OBJECT_MAX (obj
);
2895 max
= last_lived
[finish
];
2897 /* We could decrease max further in this case but it is good
2899 max
= OBJECT_CONFLICT_ID (obj
) - 1;
2900 OBJECT_MAX (obj
) = max
;
2901 /* In filling, we can go further A range finish to recognize
2902 intersection quickly because if the finish of subsequently
2903 processed allocno (it has smaller conflict id) range is
2904 further A range finish than they are definitely intersected
2905 (the reason for this is the allocnos with bigger conflict id
2906 have their range starts not smaller than allocnos with
2908 for (j
= min
; j
< filled_area_start
; j
++)
2910 filled_area_start
= min
;
2912 ira_free (last_lived
);
2913 ira_free (live_range_min
);
2915 /* For allocnos with more than one object, we may later record extra conflicts in
2916 subobject 0 that we cannot really know about here.
2917 For now, simply widen the min/max range of these subobjects. */
2919 word0_min
= INT_MAX
;
2920 word0_max
= INT_MIN
;
2922 FOR_EACH_ALLOCNO (a
, ai
)
2924 int n
= ALLOCNO_NUM_OBJECTS (a
);
2929 obj0
= ALLOCNO_OBJECT (a
, 0);
2930 if (OBJECT_CONFLICT_ID (obj0
) < word0_min
)
2931 word0_min
= OBJECT_CONFLICT_ID (obj0
);
2932 if (OBJECT_CONFLICT_ID (obj0
) > word0_max
)
2933 word0_max
= OBJECT_CONFLICT_ID (obj0
);
2935 FOR_EACH_ALLOCNO (a
, ai
)
2937 int n
= ALLOCNO_NUM_OBJECTS (a
);
2942 obj0
= ALLOCNO_OBJECT (a
, 0);
2943 if (OBJECT_MIN (obj0
) > word0_min
)
2944 OBJECT_MIN (obj0
) = word0_min
;
2945 if (OBJECT_MAX (obj0
) < word0_max
)
2946 OBJECT_MAX (obj0
) = word0_max
;
2956 ira_allocno_iterator ai
;
2957 ira_loop_tree_node_t loop_tree_node
;
2959 FOR_EACH_ALLOCNO (a
, ai
)
2961 if (ALLOCNO_LOOP_TREE_NODE (a
) == ira_loop_tree_root
)
2963 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
2964 create_cap_allocno (a
);
2965 else if (ALLOCNO_CAP (a
) == NULL
)
2967 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2968 if (!bitmap_bit_p (loop_tree_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2969 create_cap_allocno (a
);
2976 /* The page contains code transforming more one region internal
2977 representation (IR) to one region IR which is necessary for reload.
2978 This transformation is called IR flattening. We might just rebuild
2979 the IR for one region but we don't do it because it takes a lot of
2982 /* Map: regno -> allocnos which will finally represent the regno for
2983 IR with one region. */
2984 static ira_allocno_t
*regno_top_level_allocno_map
;
2986 /* Find the allocno that corresponds to A at a level one higher up in the
2987 loop tree. Returns NULL if A is a cap, or if it has no parent. */
2989 ira_parent_allocno (ira_allocno_t a
)
2991 ira_loop_tree_node_t parent
;
2993 if (ALLOCNO_CAP (a
) != NULL
)
2996 parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3000 return parent
->regno_allocno_map
[ALLOCNO_REGNO (a
)];
3003 /* Find the allocno that corresponds to A at a level one higher up in the
3004 loop tree. If ALLOCNO_CAP is set for A, return that. */
3006 ira_parent_or_cap_allocno (ira_allocno_t a
)
3008 if (ALLOCNO_CAP (a
) != NULL
)
3009 return ALLOCNO_CAP (a
);
3011 return ira_parent_allocno (a
);
3014 /* Process all allocnos originated from pseudo REGNO and copy live
3015 ranges, hard reg conflicts, and allocno stack reg attributes from
3016 low level allocnos to final allocnos which are destinations of
3017 removed stores at a loop exit. Return true if we copied live
3020 copy_info_to_removed_store_destinations (int regno
)
3023 ira_allocno_t parent_a
= NULL
;
3024 ira_loop_tree_node_t parent
;
3028 for (a
= ira_regno_allocno_map
[regno
];
3030 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3032 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))])
3033 /* This allocno will be removed. */
3036 /* Caps will be removed. */
3037 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3038 for (parent
= ALLOCNO_LOOP_TREE_NODE (a
)->parent
;
3040 parent
= parent
->parent
)
3041 if ((parent_a
= parent
->regno_allocno_map
[regno
]) == NULL
3043 == regno_top_level_allocno_map
[REGNO
3044 (allocno_emit_reg (parent_a
))]
3045 && ALLOCNO_EMIT_DATA (parent_a
)->mem_optimized_dest_p
))
3047 if (parent
== NULL
|| parent_a
== NULL
)
3050 copy_allocno_live_ranges (a
, parent_a
);
3051 merge_hard_reg_conflicts (a
, parent_a
, true);
3053 ALLOCNO_CALL_FREQ (parent_a
) += ALLOCNO_CALL_FREQ (a
);
3054 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3055 += ALLOCNO_CALLS_CROSSED_NUM (a
);
3056 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3057 += ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3058 IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (parent_a
),
3059 ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a
));
3060 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3061 += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3067 /* Flatten the IR. In other words, this function transforms IR as if
3068 it were built with one region (without loops). We could make it
3069 much simpler by rebuilding IR with one region, but unfortunately it
3070 takes a lot of time. MAX_REGNO_BEFORE_EMIT and
3071 IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
3072 IRA_MAX_POINT before emitting insns on the loop borders. */
3074 ira_flattening (int max_regno_before_emit
, int ira_max_point_before_emit
)
3079 bool new_pseudos_p
, merged_p
, mem_dest_p
;
3081 enum reg_class aclass
;
3082 ira_allocno_t a
, parent_a
, first
, second
, node_first
, node_second
;
3084 ira_loop_tree_node_t node
;
3086 ira_allocno_iterator ai
;
3087 ira_copy_iterator ci
;
3089 regno_top_level_allocno_map
3090 = (ira_allocno_t
*) ira_allocate (max_reg_num ()
3091 * sizeof (ira_allocno_t
));
3092 memset (regno_top_level_allocno_map
, 0,
3093 max_reg_num () * sizeof (ira_allocno_t
));
3094 new_pseudos_p
= merged_p
= false;
3095 FOR_EACH_ALLOCNO (a
, ai
)
3097 ira_allocno_object_iterator oi
;
3100 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3101 /* Caps are not in the regno allocno maps and they are never
3102 will be transformed into allocnos existing after IR
3105 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3106 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
3107 OBJECT_CONFLICT_HARD_REGS (obj
));
3109 ALLOCNO_TOTAL_NO_STACK_REG_P (a
) = ALLOCNO_NO_STACK_REG_P (a
);
3112 /* Fix final allocno attributes. */
3113 for (i
= max_regno_before_emit
- 1; i
>= FIRST_PSEUDO_REGISTER
; i
--)
3116 for (a
= ira_regno_allocno_map
[i
];
3118 a
= ALLOCNO_NEXT_REGNO_ALLOCNO (a
))
3120 ira_emit_data_t parent_data
, data
= ALLOCNO_EMIT_DATA (a
);
3122 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3123 if (data
->somewhere_renamed_p
)
3124 new_pseudos_p
= true;
3125 parent_a
= ira_parent_allocno (a
);
3126 if (parent_a
== NULL
)
3128 ALLOCNO_COPIES (a
) = NULL
;
3129 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3132 ira_assert (ALLOCNO_CAP_MEMBER (parent_a
) == NULL
);
3134 if (data
->mem_optimized_dest
!= NULL
)
3136 parent_data
= ALLOCNO_EMIT_DATA (parent_a
);
3137 if (REGNO (data
->reg
) == REGNO (parent_data
->reg
))
3139 merge_hard_reg_conflicts (a
, parent_a
, true);
3140 move_allocno_live_ranges (a
, parent_a
);
3142 parent_data
->mem_optimized_dest_p
3143 = (parent_data
->mem_optimized_dest_p
3144 || data
->mem_optimized_dest_p
);
3147 new_pseudos_p
= true;
3150 ALLOCNO_NREFS (parent_a
) -= ALLOCNO_NREFS (a
);
3151 ALLOCNO_FREQ (parent_a
) -= ALLOCNO_FREQ (a
);
3152 ALLOCNO_CALL_FREQ (parent_a
) -= ALLOCNO_CALL_FREQ (a
);
3153 ALLOCNO_CALLS_CROSSED_NUM (parent_a
)
3154 -= ALLOCNO_CALLS_CROSSED_NUM (a
);
3155 ALLOCNO_CHEAP_CALLS_CROSSED_NUM (parent_a
)
3156 -= ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a
);
3157 ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a
)
3158 -= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
3159 ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a
) >= 0
3160 && ALLOCNO_NREFS (parent_a
) >= 0
3161 && ALLOCNO_FREQ (parent_a
) >= 0);
3162 aclass
= ALLOCNO_CLASS (parent_a
);
3163 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
3164 if (ALLOCNO_HARD_REG_COSTS (a
) != NULL
3165 && ALLOCNO_HARD_REG_COSTS (parent_a
) != NULL
)
3166 for (j
= 0; j
< hard_regs_num
; j
++)
3167 ALLOCNO_HARD_REG_COSTS (parent_a
)[j
]
3168 -= ALLOCNO_HARD_REG_COSTS (a
)[j
];
3169 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) != NULL
3170 && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
) != NULL
)
3171 for (j
= 0; j
< hard_regs_num
; j
++)
3172 ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a
)[j
]
3173 -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[j
];
3174 ALLOCNO_CLASS_COST (parent_a
)
3175 -= ALLOCNO_CLASS_COST (a
);
3176 ALLOCNO_MEMORY_COST (parent_a
) -= ALLOCNO_MEMORY_COST (a
);
3177 parent_a
= ira_parent_allocno (parent_a
);
3178 if (parent_a
== NULL
)
3181 ALLOCNO_COPIES (a
) = NULL
;
3182 regno_top_level_allocno_map
[REGNO (data
->reg
)] = a
;
3184 if (mem_dest_p
&& copy_info_to_removed_store_destinations (i
))
3187 ira_assert (new_pseudos_p
|| ira_max_point_before_emit
== ira_max_point
);
3188 if (merged_p
|| ira_max_point_before_emit
!= ira_max_point
)
3189 ira_rebuild_start_finish_chains ();
3192 sparseset objects_live
;
3194 /* Rebuild conflicts. */
3195 FOR_EACH_ALLOCNO (a
, ai
)
3197 ira_allocno_object_iterator oi
;
3200 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3201 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3203 FOR_EACH_ALLOCNO_OBJECT (a
, obj
, oi
)
3205 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3206 ira_assert (r
->object
== obj
);
3207 clear_conflicts (obj
);
3210 objects_live
= sparseset_alloc (ira_objects_num
);
3211 for (i
= 0; i
< ira_max_point
; i
++)
3213 for (r
= ira_start_point_ranges
[i
]; r
!= NULL
; r
= r
->start_next
)
3215 ira_object_t obj
= r
->object
;
3217 a
= OBJECT_ALLOCNO (obj
);
3218 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3219 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3222 aclass
= ALLOCNO_CLASS (a
);
3223 EXECUTE_IF_SET_IN_SPARSESET (objects_live
, n
)
3225 ira_object_t live_obj
= ira_object_id_map
[n
];
3226 ira_allocno_t live_a
= OBJECT_ALLOCNO (live_obj
);
3227 enum reg_class live_aclass
= ALLOCNO_CLASS (live_a
);
3229 if (ira_reg_classes_intersect_p
[aclass
][live_aclass
]
3230 /* Don't set up conflict for the allocno with itself. */
3232 ira_add_conflict (obj
, live_obj
);
3234 sparseset_set_bit (objects_live
, OBJECT_CONFLICT_ID (obj
));
3237 for (r
= ira_finish_point_ranges
[i
]; r
!= NULL
; r
= r
->finish_next
)
3238 sparseset_clear_bit (objects_live
, OBJECT_CONFLICT_ID (r
->object
));
3240 sparseset_free (objects_live
);
3241 compress_conflict_vecs ();
3243 /* Mark some copies for removing and change allocnos in the rest
3245 FOR_EACH_COPY (cp
, ci
)
3247 if (ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
3248 || ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
)
3250 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3252 (ira_dump_file
, " Remove cp%d:%c%dr%d-%c%dr%d\n",
3253 cp
->num
, ALLOCNO_CAP_MEMBER (cp
->first
) != NULL
? 'c' : 'a',
3254 ALLOCNO_NUM (cp
->first
),
3255 REGNO (allocno_emit_reg (cp
->first
)),
3256 ALLOCNO_CAP_MEMBER (cp
->second
) != NULL
? 'c' : 'a',
3257 ALLOCNO_NUM (cp
->second
),
3258 REGNO (allocno_emit_reg (cp
->second
)));
3259 cp
->loop_tree_node
= NULL
;
3263 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->first
))];
3265 = regno_top_level_allocno_map
[REGNO (allocno_emit_reg (cp
->second
))];
3266 node
= cp
->loop_tree_node
;
3268 keep_p
= true; /* It copy generated in ira-emit.c. */
3271 /* Check that the copy was not propagated from level on
3272 which we will have different pseudos. */
3273 node_first
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->first
)];
3274 node_second
= node
->regno_allocno_map
[ALLOCNO_REGNO (cp
->second
)];
3275 keep_p
= ((REGNO (allocno_emit_reg (first
))
3276 == REGNO (allocno_emit_reg (node_first
)))
3277 && (REGNO (allocno_emit_reg (second
))
3278 == REGNO (allocno_emit_reg (node_second
))));
3282 cp
->loop_tree_node
= ira_loop_tree_root
;
3284 cp
->second
= second
;
3288 cp
->loop_tree_node
= NULL
;
3289 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3290 fprintf (ira_dump_file
, " Remove cp%d:a%dr%d-a%dr%d\n",
3291 cp
->num
, ALLOCNO_NUM (cp
->first
),
3292 REGNO (allocno_emit_reg (cp
->first
)),
3293 ALLOCNO_NUM (cp
->second
),
3294 REGNO (allocno_emit_reg (cp
->second
)));
3297 /* Remove unnecessary allocnos on lower levels of the loop tree. */
3298 FOR_EACH_ALLOCNO (a
, ai
)
3300 if (a
!= regno_top_level_allocno_map
[REGNO (allocno_emit_reg (a
))]
3301 || ALLOCNO_CAP_MEMBER (a
) != NULL
)
3303 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
3304 fprintf (ira_dump_file
, " Remove a%dr%d\n",
3305 ALLOCNO_NUM (a
), REGNO (allocno_emit_reg (a
)));
3306 ira_remove_allocno_prefs (a
);
3310 ALLOCNO_LOOP_TREE_NODE (a
) = ira_loop_tree_root
;
3311 ALLOCNO_REGNO (a
) = REGNO (allocno_emit_reg (a
));
3312 ALLOCNO_CAP (a
) = NULL
;
3313 /* Restore updated costs for assignments from reload. */
3314 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
3315 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
3316 if (! ALLOCNO_ASSIGNED_P (a
))
3317 ira_free_allocno_updated_costs (a
);
3318 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3319 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3321 /* Remove unnecessary copies. */
3322 FOR_EACH_COPY (cp
, ci
)
3324 if (cp
->loop_tree_node
== NULL
)
3326 ira_copies
[cp
->num
] = NULL
;
3331 (ALLOCNO_LOOP_TREE_NODE (cp
->first
) == ira_loop_tree_root
3332 && ALLOCNO_LOOP_TREE_NODE (cp
->second
) == ira_loop_tree_root
);
3333 add_allocno_copy_to_list (cp
);
3334 swap_allocno_copy_ends_if_necessary (cp
);
3336 rebuild_regno_allocno_maps ();
3337 if (ira_max_point
!= ira_max_point_before_emit
)
3338 ira_compress_allocno_live_ranges ();
3339 ira_free (regno_top_level_allocno_map
);
3344 #ifdef ENABLE_IRA_CHECKING
3345 /* Check creation of all allocnos. Allocnos on lower levels should
3346 have allocnos or caps on all upper levels. */
3348 check_allocno_creation (void)
3351 ira_allocno_iterator ai
;
3352 ira_loop_tree_node_t loop_tree_node
;
3354 FOR_EACH_ALLOCNO (a
, ai
)
3356 loop_tree_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3357 ira_assert (bitmap_bit_p (loop_tree_node
->all_allocnos
,
3359 if (loop_tree_node
== ira_loop_tree_root
)
3361 if (ALLOCNO_CAP_MEMBER (a
) != NULL
)
3362 ira_assert (ALLOCNO_CAP (a
) != NULL
);
3363 else if (ALLOCNO_CAP (a
) == NULL
)
3364 ira_assert (loop_tree_node
->parent
3365 ->regno_allocno_map
[ALLOCNO_REGNO (a
)] != NULL
3366 && bitmap_bit_p (loop_tree_node
->border_allocnos
,
3372 /* Identify allocnos which prefer a register class with a single hard register.
3373 Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3374 less likely to use the preferred singleton register. */
3376 update_conflict_hard_reg_costs (void)
3379 ira_allocno_iterator ai
;
3382 FOR_EACH_ALLOCNO (a
, ai
)
3384 reg_class_t aclass
= ALLOCNO_CLASS (a
);
3385 reg_class_t pref
= reg_preferred_class (ALLOCNO_REGNO (a
));
3386 int singleton
= ira_class_singleton
[pref
][ALLOCNO_MODE (a
)];
3389 index
= ira_class_hard_reg_index
[(int) aclass
][singleton
];
3392 if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a
) == NULL
3393 || ALLOCNO_HARD_REG_COSTS (a
) == NULL
)
3396 for (i
= ira_class_hard_regs_num
[(int) aclass
] - 1; i
>= 0; i
--)
3397 if (ALLOCNO_HARD_REG_COSTS (a
)[i
] > ALLOCNO_CLASS_COST (a
)
3398 && min
> ALLOCNO_HARD_REG_COSTS (a
)[i
])
3399 min
= ALLOCNO_HARD_REG_COSTS (a
)[i
];
3402 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a
),
3404 ALLOCNO_CONFLICT_HARD_REG_COSTS (a
)[index
]
3405 -= min
- ALLOCNO_CLASS_COST (a
);
3409 /* Create a internal representation (IR) for IRA (allocnos, copies,
3410 loop tree nodes). The function returns TRUE if we generate loop
3411 structure (besides nodes representing all function and the basic
3412 blocks) for regional allocation. A true return means that we
3413 really need to flatten IR before the reload. */
3420 initiate_cost_vectors ();
3421 initiate_allocnos ();
3424 create_loop_tree_nodes ();
3428 create_allocno_objects ();
3429 ira_create_allocno_live_ranges ();
3430 remove_unnecessary_regions (false);
3431 ira_compress_allocno_live_ranges ();
3432 update_bad_spill_attribute ();
3433 loops_p
= more_one_region_p ();
3436 propagate_allocno_info ();
3439 ira_tune_allocno_costs ();
3440 #ifdef ENABLE_IRA_CHECKING
3441 check_allocno_creation ();
3443 setup_min_max_allocno_live_range_point ();
3444 sort_conflict_id_map ();
3445 setup_min_max_conflict_allocno_ids ();
3446 ira_build_conflicts ();
3447 update_conflict_hard_reg_costs ();
3448 if (! ira_conflicts_p
)
3451 ira_allocno_iterator ai
;
3453 /* Remove all regions but root one. */
3456 remove_unnecessary_regions (true);
3459 /* We don't save hard registers around calls for fast allocation
3460 -- add caller clobbered registers as conflicting ones to
3461 allocno crossing calls. */
3462 FOR_EACH_ALLOCNO (a
, ai
)
3463 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3464 ior_hard_reg_conflicts (a
, &call_used_reg_set
);
3466 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3467 print_copies (ira_dump_file
);
3468 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3469 print_prefs (ira_dump_file
);
3470 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3475 ira_allocno_iterator ai
;
3480 FOR_EACH_ALLOCNO (a
, ai
)
3482 int j
, nobj
= ALLOCNO_NUM_OBJECTS (a
);
3486 for (j
= 0; j
< nobj
; j
++)
3488 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3489 n
+= OBJECT_NUM_CONFLICTS (obj
);
3490 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
3494 fprintf (ira_dump_file
, " regions=%d, blocks=%d, points=%d\n",
3495 current_loops
== NULL
? 1 : number_of_loops (cfun
),
3496 n_basic_blocks_for_fn (cfun
), ira_max_point
);
3497 fprintf (ira_dump_file
,
3498 " allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3499 ira_allocnos_num
, nr_big
, ira_copies_num
, n
, nr
);
3504 /* Release the data created by function ira_build. */
3508 finish_loop_tree_nodes ();
3512 finish_cost_vectors ();
3513 ira_finish_allocno_live_ranges ();