1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2013 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "hash-table.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
36 #include "diagnostic-core.h"
42 typedef struct allocno_hard_regs
*allocno_hard_regs_t
;
44 /* The structure contains information about hard registers can be
45 assigned to allocnos. Usually it is allocno profitable hard
46 registers but in some cases this set can be a bit different. Major
47 reason of the difference is a requirement to use hard register sets
48 that form a tree or a forest (set of trees), i.e. hard register set
49 of a node should contain hard register sets of its subnodes. */
50 struct allocno_hard_regs
52 /* Hard registers can be assigned to an allocno. */
54 /* Overall (spilling) cost of all allocnos with given register
59 typedef struct allocno_hard_regs_node
*allocno_hard_regs_node_t
;
61 /* A node representing allocno hard registers. Such nodes form a
62 forest (set of trees). Each subnode of given node in the forest
63 refers for hard register set (usually allocno profitable hard
64 register set) which is a subset of one referred from given
66 struct allocno_hard_regs_node
68 /* Set up number of the node in preorder traversing of the forest. */
70 /* Used for different calculation like finding conflict size of an
73 /* Used for calculation of conflict size of an allocno. The
74 conflict size of the allocno is maximal number of given allocno
75 hard registers needed for allocation of the conflicting allocnos.
76 Given allocno is trivially colored if this number plus the number
77 of hard registers needed for given allocno is not greater than
78 the number of given allocno hard register set. */
80 /* The number of hard registers given by member hard_regs. */
82 /* The following member is used to form the final forest. */
84 /* Pointer to the corresponding profitable hard registers. */
85 allocno_hard_regs_t hard_regs
;
86 /* Parent, first subnode, previous and next node with the same
87 parent in the forest. */
88 allocno_hard_regs_node_t parent
, first
, prev
, next
;
91 /* To decrease footprint of ira_allocno structure we store all data
92 needed only for coloring in the following structure. */
93 struct allocno_color_data
95 /* TRUE value means that the allocno was not removed yet from the
96 conflicting graph during colouring. */
97 unsigned int in_graph_p
: 1;
98 /* TRUE if it is put on the stack to make other allocnos
100 unsigned int may_be_spilled_p
: 1;
101 /* TRUE if the allocno is trivially colorable. */
102 unsigned int colorable_p
: 1;
103 /* Number of hard registers of the allocno class really
104 available for the allocno allocation. It is number of the
105 profitable hard regs. */
106 int available_regs_num
;
107 /* Allocnos in a bucket (used in coloring) chained by the following
109 ira_allocno_t next_bucket_allocno
;
110 ira_allocno_t prev_bucket_allocno
;
111 /* Used for temporary purposes. */
113 /* Used to exclude repeated processing. */
115 /* Profitable hard regs available for this pseudo allocation. It
116 means that the set excludes unavailable hard regs and hard regs
117 conflicting with given pseudo. They should be of the allocno
119 HARD_REG_SET profitable_hard_regs
;
120 /* The allocno hard registers node. */
121 allocno_hard_regs_node_t hard_regs_node
;
122 /* Array of structures allocno_hard_regs_subnode representing
123 given allocno hard registers node (the 1st element in the array)
124 and all its subnodes in the tree (forest) of allocno hard
125 register nodes (see comments above). */
126 int hard_regs_subnodes_start
;
127 /* The length of the previous array. */
128 int hard_regs_subnodes_num
;
132 typedef struct allocno_color_data
*allocno_color_data_t
;
134 /* Container for storing allocno data concerning coloring. */
135 static allocno_color_data_t allocno_color_data
;
137 /* Macro to access the data concerning coloring. */
138 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
140 /* Used for finding allocno colorability to exclude repeated allocno
141 processing and for updating preferencing to exclude repeated
142 allocno processing during assignment. */
143 static int curr_allocno_process
;
145 /* This file contains code for regional graph coloring, spill/restore
146 code placement optimization, and code helping the reload pass to do
149 /* Bitmap of allocnos which should be colored. */
150 static bitmap coloring_allocno_bitmap
;
152 /* Bitmap of allocnos which should be taken into account during
153 coloring. In general case it contains allocnos from
154 coloring_allocno_bitmap plus other already colored conflicting
156 static bitmap consideration_allocno_bitmap
;
158 /* All allocnos sorted according their priorities. */
159 static ira_allocno_t
*sorted_allocnos
;
161 /* Vec representing the stack of allocnos used during coloring. */
162 static vec
<ira_allocno_t
> allocno_stack_vec
;
164 /* Helper for qsort comparison callbacks - return a positive integer if
165 X > Y, or a negative value otherwise. Use a conditional expression
166 instead of a difference computation to insulate from possible overflow
167 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
168 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
172 /* Definition of vector of allocno hard registers. */
174 /* Vector of unique allocno hard registers. */
175 static vec
<allocno_hard_regs_t
> allocno_hard_regs_vec
;
177 struct allocno_hard_regs_hasher
: typed_noop_remove
<allocno_hard_regs
>
179 typedef allocno_hard_regs value_type
;
180 typedef allocno_hard_regs compare_type
;
181 static inline hashval_t
hash (const value_type
*);
182 static inline bool equal (const value_type
*, const compare_type
*);
185 /* Returns hash value for allocno hard registers V. */
187 allocno_hard_regs_hasher::hash (const value_type
*hv
)
189 return iterative_hash (&hv
->set
, sizeof (HARD_REG_SET
), 0);
192 /* Compares allocno hard registers V1 and V2. */
194 allocno_hard_regs_hasher::equal (const value_type
*hv1
, const compare_type
*hv2
)
196 return hard_reg_set_equal_p (hv1
->set
, hv2
->set
);
199 /* Hash table of unique allocno hard registers. */
200 static hash_table
<allocno_hard_regs_hasher
> allocno_hard_regs_htab
;
202 /* Return allocno hard registers in the hash table equal to HV. */
203 static allocno_hard_regs_t
204 find_hard_regs (allocno_hard_regs_t hv
)
206 return allocno_hard_regs_htab
.find (hv
);
209 /* Insert allocno hard registers HV in the hash table (if it is not
210 there yet) and return the value which in the table. */
211 static allocno_hard_regs_t
212 insert_hard_regs (allocno_hard_regs_t hv
)
214 allocno_hard_regs
**slot
= allocno_hard_regs_htab
.find_slot (hv
, INSERT
);
221 /* Initialize data concerning allocno hard registers. */
223 init_allocno_hard_regs (void)
225 allocno_hard_regs_vec
.create (200);
226 allocno_hard_regs_htab
.create (200);
229 /* Add (or update info about) allocno hard registers with SET and
231 static allocno_hard_regs_t
232 add_allocno_hard_regs (HARD_REG_SET set
, HOST_WIDEST_INT cost
)
234 struct allocno_hard_regs temp
;
235 allocno_hard_regs_t hv
;
237 gcc_assert (! hard_reg_set_empty_p (set
));
238 COPY_HARD_REG_SET (temp
.set
, set
);
239 if ((hv
= find_hard_regs (&temp
)) != NULL
)
243 hv
= ((struct allocno_hard_regs
*)
244 ira_allocate (sizeof (struct allocno_hard_regs
)));
245 COPY_HARD_REG_SET (hv
->set
, set
);
247 allocno_hard_regs_vec
.safe_push (hv
);
248 insert_hard_regs (hv
);
253 /* Finalize data concerning allocno hard registers. */
255 finish_allocno_hard_regs (void)
258 allocno_hard_regs_t hv
;
261 allocno_hard_regs_vec
.iterate (i
, &hv
);
264 allocno_hard_regs_htab
.dispose ();
265 allocno_hard_regs_vec
.release ();
268 /* Sort hard regs according to their frequency of usage. */
270 allocno_hard_regs_compare (const void *v1p
, const void *v2p
)
272 allocno_hard_regs_t hv1
= *(const allocno_hard_regs_t
*) v1p
;
273 allocno_hard_regs_t hv2
= *(const allocno_hard_regs_t
*) v2p
;
275 if (hv2
->cost
> hv1
->cost
)
277 else if (hv2
->cost
< hv1
->cost
)
285 /* Used for finding a common ancestor of two allocno hard registers
286 nodes in the forest. We use the current value of
287 'node_check_tick' to mark all nodes from one node to the top and
288 then walking up from another node until we find a marked node.
290 It is also used to figure out allocno colorability as a mark that
291 we already reset value of member 'conflict_size' for the forest
292 node corresponding to the processed allocno. */
293 static int node_check_tick
;
295 /* Roots of the forest containing hard register sets can be assigned
297 static allocno_hard_regs_node_t hard_regs_roots
;
299 /* Definition of vector of allocno hard register nodes. */
301 /* Vector used to create the forest. */
302 static vec
<allocno_hard_regs_node_t
> hard_regs_node_vec
;
304 /* Create and return allocno hard registers node containing allocno
305 hard registers HV. */
306 static allocno_hard_regs_node_t
307 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv
)
309 allocno_hard_regs_node_t new_node
;
311 new_node
= ((struct allocno_hard_regs_node
*)
312 ira_allocate (sizeof (struct allocno_hard_regs_node
)));
314 new_node
->hard_regs
= hv
;
315 new_node
->hard_regs_num
= hard_reg_set_size (hv
->set
);
316 new_node
->first
= NULL
;
317 new_node
->used_p
= false;
321 /* Add allocno hard registers node NEW_NODE to the forest on its level
324 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t
*roots
,
325 allocno_hard_regs_node_t new_node
)
327 new_node
->next
= *roots
;
328 if (new_node
->next
!= NULL
)
329 new_node
->next
->prev
= new_node
;
330 new_node
->prev
= NULL
;
334 /* Add allocno hard registers HV (or its best approximation if it is
335 not possible) to the forest on its level given by ROOTS. */
337 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t
*roots
,
338 allocno_hard_regs_t hv
)
340 unsigned int i
, start
;
341 allocno_hard_regs_node_t node
, prev
, new_node
;
342 HARD_REG_SET temp_set
;
343 allocno_hard_regs_t hv2
;
345 start
= hard_regs_node_vec
.length ();
346 for (node
= *roots
; node
!= NULL
; node
= node
->next
)
348 if (hard_reg_set_equal_p (hv
->set
, node
->hard_regs
->set
))
350 if (hard_reg_set_subset_p (hv
->set
, node
->hard_regs
->set
))
352 add_allocno_hard_regs_to_forest (&node
->first
, hv
);
355 if (hard_reg_set_subset_p (node
->hard_regs
->set
, hv
->set
))
356 hard_regs_node_vec
.safe_push (node
);
357 else if (hard_reg_set_intersect_p (hv
->set
, node
->hard_regs
->set
))
359 COPY_HARD_REG_SET (temp_set
, hv
->set
);
360 AND_HARD_REG_SET (temp_set
, node
->hard_regs
->set
);
361 hv2
= add_allocno_hard_regs (temp_set
, hv
->cost
);
362 add_allocno_hard_regs_to_forest (&node
->first
, hv2
);
365 if (hard_regs_node_vec
.length ()
368 /* Create a new node which contains nodes in hard_regs_node_vec. */
369 CLEAR_HARD_REG_SET (temp_set
);
371 i
< hard_regs_node_vec
.length ();
374 node
= hard_regs_node_vec
[i
];
375 IOR_HARD_REG_SET (temp_set
, node
->hard_regs
->set
);
377 hv
= add_allocno_hard_regs (temp_set
, hv
->cost
);
378 new_node
= create_new_allocno_hard_regs_node (hv
);
381 i
< hard_regs_node_vec
.length ();
384 node
= hard_regs_node_vec
[i
];
385 if (node
->prev
== NULL
)
388 node
->prev
->next
= node
->next
;
389 if (node
->next
!= NULL
)
390 node
->next
->prev
= node
->prev
;
392 new_node
->first
= node
;
399 add_new_allocno_hard_regs_node_to_forest (roots
, new_node
);
401 hard_regs_node_vec
.truncate (start
);
404 /* Add allocno hard registers nodes starting with the forest level
405 given by FIRST which contains biggest set inside SET. */
407 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first
,
410 allocno_hard_regs_node_t node
;
412 ira_assert (first
!= NULL
);
413 for (node
= first
; node
!= NULL
; node
= node
->next
)
414 if (hard_reg_set_subset_p (node
->hard_regs
->set
, set
))
415 hard_regs_node_vec
.safe_push (node
);
416 else if (hard_reg_set_intersect_p (set
, node
->hard_regs
->set
))
417 collect_allocno_hard_regs_cover (node
->first
, set
);
420 /* Set up field parent as PARENT in all allocno hard registers nodes
421 in forest given by FIRST. */
423 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first
,
424 allocno_hard_regs_node_t parent
)
426 allocno_hard_regs_node_t node
;
428 for (node
= first
; node
!= NULL
; node
= node
->next
)
430 node
->parent
= parent
;
431 setup_allocno_hard_regs_nodes_parent (node
->first
, node
);
435 /* Return allocno hard registers node which is a first common ancestor
436 node of FIRST and SECOND in the forest. */
437 static allocno_hard_regs_node_t
438 first_common_ancestor_node (allocno_hard_regs_node_t first
,
439 allocno_hard_regs_node_t second
)
441 allocno_hard_regs_node_t node
;
444 for (node
= first
; node
!= NULL
; node
= node
->parent
)
445 node
->check
= node_check_tick
;
446 for (node
= second
; node
!= NULL
; node
= node
->parent
)
447 if (node
->check
== node_check_tick
)
449 return first_common_ancestor_node (second
, first
);
452 /* Print hard reg set SET to F. */
454 print_hard_reg_set (FILE *f
, HARD_REG_SET set
, bool new_line_p
)
458 for (start
= -1, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
460 if (TEST_HARD_REG_BIT (set
, i
))
462 if (i
== 0 || ! TEST_HARD_REG_BIT (set
, i
- 1))
466 && (i
== FIRST_PSEUDO_REGISTER
- 1 || ! TEST_HARD_REG_BIT (set
, i
)))
469 fprintf (f
, " %d", start
);
470 else if (start
== i
- 2)
471 fprintf (f
, " %d %d", start
, start
+ 1);
473 fprintf (f
, " %d-%d", start
, i
- 1);
481 /* Print allocno hard register subforest given by ROOTS and its LEVEL
484 print_hard_regs_subforest (FILE *f
, allocno_hard_regs_node_t roots
,
488 allocno_hard_regs_node_t node
;
490 for (node
= roots
; node
!= NULL
; node
= node
->next
)
493 for (i
= 0; i
< level
* 2; i
++)
495 fprintf (f
, "%d:(", node
->preorder_num
);
496 print_hard_reg_set (f
, node
->hard_regs
->set
, false);
497 fprintf (f
, ")@" HOST_WIDEST_INT_PRINT_DEC
"\n", node
->hard_regs
->cost
);
498 print_hard_regs_subforest (f
, node
->first
, level
+ 1);
502 /* Print the allocno hard register forest to F. */
504 print_hard_regs_forest (FILE *f
)
506 fprintf (f
, " Hard reg set forest:\n");
507 print_hard_regs_subforest (f
, hard_regs_roots
, 1);
510 /* Print the allocno hard register forest to stderr. */
512 ira_debug_hard_regs_forest (void)
514 print_hard_regs_forest (stderr
);
517 /* Remove unused allocno hard registers nodes from forest given by its
520 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t
*roots
)
522 allocno_hard_regs_node_t node
, prev
, next
, last
;
524 for (prev
= NULL
, node
= *roots
; node
!= NULL
; node
= next
)
529 remove_unused_allocno_hard_regs_nodes (&node
->first
);
534 for (last
= node
->first
;
535 last
!= NULL
&& last
->next
!= NULL
;
541 *roots
= node
->first
;
543 prev
->next
= node
->first
;
563 /* Set up fields preorder_num starting with START_NUM in all allocno
564 hard registers nodes in forest given by FIRST. Return biggest set
565 PREORDER_NUM increased by 1. */
567 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first
,
568 allocno_hard_regs_node_t parent
,
571 allocno_hard_regs_node_t node
;
573 for (node
= first
; node
!= NULL
; node
= node
->next
)
575 node
->preorder_num
= start_num
++;
576 node
->parent
= parent
;
577 start_num
= enumerate_allocno_hard_regs_nodes (node
->first
, node
,
583 /* Number of allocno hard registers nodes in the forest. */
584 static int allocno_hard_regs_nodes_num
;
586 /* Table preorder number of allocno hard registers node in the forest
587 -> the allocno hard registers node. */
588 static allocno_hard_regs_node_t
*allocno_hard_regs_nodes
;
591 typedef struct allocno_hard_regs_subnode
*allocno_hard_regs_subnode_t
;
593 /* The structure is used to describes all subnodes (not only immediate
594 ones) in the mentioned above tree for given allocno hard register
595 node. The usage of such data accelerates calculation of
596 colorability of given allocno. */
597 struct allocno_hard_regs_subnode
599 /* The conflict size of conflicting allocnos whose hard register
600 sets are equal sets (plus supersets if given node is given
601 allocno hard registers node) of one in the given node. */
602 int left_conflict_size
;
603 /* The summary conflict size of conflicting allocnos whose hard
604 register sets are strict subsets of one in the given node.
605 Overall conflict size is
606 left_conflict_subnodes_size
607 + MIN (max_node_impact - left_conflict_subnodes_size,
610 short left_conflict_subnodes_size
;
611 short max_node_impact
;
614 /* Container for hard regs subnodes of all allocnos. */
615 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes
;
617 /* Table (preorder number of allocno hard registers node in the
618 forest, preorder number of allocno hard registers subnode) -> index
619 of the subnode relative to the node. -1 if it is not a
621 static int *allocno_hard_regs_subnode_index
;
623 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
624 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
626 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first
)
628 allocno_hard_regs_node_t node
, parent
;
631 for (node
= first
; node
!= NULL
; node
= node
->next
)
633 allocno_hard_regs_nodes
[node
->preorder_num
] = node
;
634 for (parent
= node
; parent
!= NULL
; parent
= parent
->parent
)
636 index
= parent
->preorder_num
* allocno_hard_regs_nodes_num
;
637 allocno_hard_regs_subnode_index
[index
+ node
->preorder_num
]
638 = node
->preorder_num
- parent
->preorder_num
;
640 setup_allocno_hard_regs_subnode_index (node
->first
);
644 /* Count all allocno hard registers nodes in tree ROOT. */
646 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root
)
650 for (root
= root
->first
; root
!= NULL
; root
= root
->next
)
651 len
+= get_allocno_hard_regs_subnodes_num (root
);
655 /* Build the forest of allocno hard registers nodes and assign each
656 allocno a node from the forest. */
658 form_allocno_hard_regs_nodes_forest (void)
660 unsigned int i
, j
, size
, len
;
663 allocno_hard_regs_t hv
;
666 allocno_hard_regs_node_t node
, allocno_hard_regs_node
;
667 allocno_color_data_t allocno_data
;
670 init_allocno_hard_regs ();
671 hard_regs_roots
= NULL
;
672 hard_regs_node_vec
.create (100);
673 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
674 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, i
))
676 CLEAR_HARD_REG_SET (temp
);
677 SET_HARD_REG_BIT (temp
, i
);
678 hv
= add_allocno_hard_regs (temp
, 0);
679 node
= create_new_allocno_hard_regs_node (hv
);
680 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots
, node
);
682 start
= allocno_hard_regs_vec
.length ();
683 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
686 allocno_data
= ALLOCNO_COLOR_DATA (a
);
688 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
690 hv
= (add_allocno_hard_regs
691 (allocno_data
->profitable_hard_regs
,
692 ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
)));
694 SET_HARD_REG_SET (temp
);
695 AND_COMPL_HARD_REG_SET (temp
, ira_no_alloc_regs
);
696 add_allocno_hard_regs (temp
, 0);
697 qsort (allocno_hard_regs_vec
.address () + start
,
698 allocno_hard_regs_vec
.length () - start
,
699 sizeof (allocno_hard_regs_t
), allocno_hard_regs_compare
);
701 allocno_hard_regs_vec
.iterate (i
, &hv
);
704 add_allocno_hard_regs_to_forest (&hard_regs_roots
, hv
);
705 ira_assert (hard_regs_node_vec
.length () == 0);
707 /* We need to set up parent fields for right work of
708 first_common_ancestor_node. */
709 setup_allocno_hard_regs_nodes_parent (hard_regs_roots
, NULL
);
710 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
713 allocno_data
= ALLOCNO_COLOR_DATA (a
);
714 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
716 hard_regs_node_vec
.truncate (0);
717 collect_allocno_hard_regs_cover (hard_regs_roots
,
718 allocno_data
->profitable_hard_regs
);
719 allocno_hard_regs_node
= NULL
;
720 for (j
= 0; hard_regs_node_vec
.iterate (j
, &node
); j
++)
721 allocno_hard_regs_node
724 : first_common_ancestor_node (node
, allocno_hard_regs_node
));
725 /* That is a temporary storage. */
726 allocno_hard_regs_node
->used_p
= true;
727 allocno_data
->hard_regs_node
= allocno_hard_regs_node
;
729 ira_assert (hard_regs_roots
->next
== NULL
);
730 hard_regs_roots
->used_p
= true;
731 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots
);
732 allocno_hard_regs_nodes_num
733 = enumerate_allocno_hard_regs_nodes (hard_regs_roots
, NULL
, 0);
734 allocno_hard_regs_nodes
735 = ((allocno_hard_regs_node_t
*)
736 ira_allocate (allocno_hard_regs_nodes_num
737 * sizeof (allocno_hard_regs_node_t
)));
738 size
= allocno_hard_regs_nodes_num
* allocno_hard_regs_nodes_num
;
739 allocno_hard_regs_subnode_index
740 = (int *) ira_allocate (size
* sizeof (int));
741 for (i
= 0; i
< size
; i
++)
742 allocno_hard_regs_subnode_index
[i
] = -1;
743 setup_allocno_hard_regs_subnode_index (hard_regs_roots
);
745 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
748 allocno_data
= ALLOCNO_COLOR_DATA (a
);
749 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
751 len
= get_allocno_hard_regs_subnodes_num (allocno_data
->hard_regs_node
);
752 allocno_data
->hard_regs_subnodes_start
= start
;
753 allocno_data
->hard_regs_subnodes_num
= len
;
756 allocno_hard_regs_subnodes
757 = ((allocno_hard_regs_subnode_t
)
758 ira_allocate (sizeof (struct allocno_hard_regs_subnode
) * start
));
759 hard_regs_node_vec
.release ();
762 /* Free tree of allocno hard registers nodes given by its ROOT. */
764 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root
)
766 allocno_hard_regs_node_t child
, next
;
768 for (child
= root
->first
; child
!= NULL
; child
= next
)
771 finish_allocno_hard_regs_nodes_tree (child
);
776 /* Finish work with the forest of allocno hard registers nodes. */
778 finish_allocno_hard_regs_nodes_forest (void)
780 allocno_hard_regs_node_t node
, next
;
782 ira_free (allocno_hard_regs_subnodes
);
783 for (node
= hard_regs_roots
; node
!= NULL
; node
= next
)
786 finish_allocno_hard_regs_nodes_tree (node
);
788 ira_free (allocno_hard_regs_nodes
);
789 ira_free (allocno_hard_regs_subnode_index
);
790 finish_allocno_hard_regs ();
793 /* Set up left conflict sizes and left conflict subnodes sizes of hard
794 registers subnodes of allocno A. Return TRUE if allocno A is
795 trivially colorable. */
797 setup_left_conflict_sizes_p (ira_allocno_t a
)
799 int i
, k
, nobj
, start
;
800 int conflict_size
, left_conflict_subnodes_size
, node_preorder_num
;
801 allocno_color_data_t data
;
802 HARD_REG_SET profitable_hard_regs
;
803 allocno_hard_regs_subnode_t subnodes
;
804 allocno_hard_regs_node_t node
;
805 HARD_REG_SET node_set
;
807 nobj
= ALLOCNO_NUM_OBJECTS (a
);
809 data
= ALLOCNO_COLOR_DATA (a
);
810 subnodes
= allocno_hard_regs_subnodes
+ data
->hard_regs_subnodes_start
;
811 COPY_HARD_REG_SET (profitable_hard_regs
, data
->profitable_hard_regs
);
812 node
= data
->hard_regs_node
;
813 node_preorder_num
= node
->preorder_num
;
814 COPY_HARD_REG_SET (node_set
, node
->hard_regs
->set
);
816 for (k
= 0; k
< nobj
; k
++)
818 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
819 ira_object_t conflict_obj
;
820 ira_object_conflict_iterator oci
;
822 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
825 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
826 allocno_hard_regs_node_t conflict_node
, temp_node
;
827 HARD_REG_SET conflict_node_set
;
828 allocno_color_data_t conflict_data
;
830 conflict_data
= ALLOCNO_COLOR_DATA (conflict_a
);
831 if (! ALLOCNO_COLOR_DATA (conflict_a
)->in_graph_p
832 || ! hard_reg_set_intersect_p (profitable_hard_regs
,
834 ->profitable_hard_regs
))
836 conflict_node
= conflict_data
->hard_regs_node
;
837 COPY_HARD_REG_SET (conflict_node_set
, conflict_node
->hard_regs
->set
);
838 if (hard_reg_set_subset_p (node_set
, conflict_node_set
))
842 ira_assert (hard_reg_set_subset_p (conflict_node_set
, node_set
));
843 temp_node
= conflict_node
;
845 if (temp_node
->check
!= node_check_tick
)
847 temp_node
->check
= node_check_tick
;
848 temp_node
->conflict_size
= 0;
850 size
= (ira_reg_class_max_nregs
851 [ALLOCNO_CLASS (conflict_a
)][ALLOCNO_MODE (conflict_a
)]);
852 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1)
853 /* We will deal with the subwords individually. */
855 temp_node
->conflict_size
+= size
;
858 for (i
= 0; i
< data
->hard_regs_subnodes_num
; i
++)
860 allocno_hard_regs_node_t temp_node
;
862 temp_node
= allocno_hard_regs_nodes
[i
+ node_preorder_num
];
863 ira_assert (temp_node
->preorder_num
== i
+ node_preorder_num
);
864 subnodes
[i
].left_conflict_size
= (temp_node
->check
!= node_check_tick
865 ? 0 : temp_node
->conflict_size
);
866 if (hard_reg_set_subset_p (temp_node
->hard_regs
->set
,
867 profitable_hard_regs
))
868 subnodes
[i
].max_node_impact
= temp_node
->hard_regs_num
;
871 HARD_REG_SET temp_set
;
872 int j
, n
, hard_regno
;
873 enum reg_class aclass
;
875 COPY_HARD_REG_SET (temp_set
, temp_node
->hard_regs
->set
);
876 AND_HARD_REG_SET (temp_set
, profitable_hard_regs
);
877 aclass
= ALLOCNO_CLASS (a
);
878 for (n
= 0, j
= ira_class_hard_regs_num
[aclass
] - 1; j
>= 0; j
--)
880 hard_regno
= ira_class_hard_regs
[aclass
][j
];
881 if (TEST_HARD_REG_BIT (temp_set
, hard_regno
))
884 subnodes
[i
].max_node_impact
= n
;
886 subnodes
[i
].left_conflict_subnodes_size
= 0;
888 start
= node_preorder_num
* allocno_hard_regs_nodes_num
;
889 for (i
= data
->hard_regs_subnodes_num
- 1; i
>= 0; i
--)
892 allocno_hard_regs_node_t parent
;
894 size
= (subnodes
[i
].left_conflict_subnodes_size
895 + MIN (subnodes
[i
].max_node_impact
896 - subnodes
[i
].left_conflict_subnodes_size
,
897 subnodes
[i
].left_conflict_size
));
898 parent
= allocno_hard_regs_nodes
[i
+ node_preorder_num
]->parent
;
902 = allocno_hard_regs_subnode_index
[start
+ parent
->preorder_num
];
905 subnodes
[parent_i
].left_conflict_subnodes_size
+= size
;
907 left_conflict_subnodes_size
= subnodes
[0].left_conflict_subnodes_size
;
909 += (left_conflict_subnodes_size
910 + MIN (subnodes
[0].max_node_impact
- left_conflict_subnodes_size
,
911 subnodes
[0].left_conflict_size
));
912 conflict_size
+= ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)];
913 data
->colorable_p
= conflict_size
<= data
->available_regs_num
;
914 return data
->colorable_p
;
917 /* Update left conflict sizes of hard registers subnodes of allocno A
918 after removing allocno REMOVED_A with SIZE from the conflict graph.
919 Return TRUE if A is trivially colorable. */
921 update_left_conflict_sizes_p (ira_allocno_t a
,
922 ira_allocno_t removed_a
, int size
)
924 int i
, conflict_size
, before_conflict_size
, diff
, start
;
925 int node_preorder_num
, parent_i
;
926 allocno_hard_regs_node_t node
, removed_node
, parent
;
927 allocno_hard_regs_subnode_t subnodes
;
928 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
930 ira_assert (! data
->colorable_p
);
931 node
= data
->hard_regs_node
;
932 node_preorder_num
= node
->preorder_num
;
933 removed_node
= ALLOCNO_COLOR_DATA (removed_a
)->hard_regs_node
;
934 ira_assert (hard_reg_set_subset_p (removed_node
->hard_regs
->set
,
935 node
->hard_regs
->set
)
936 || hard_reg_set_subset_p (node
->hard_regs
->set
,
937 removed_node
->hard_regs
->set
));
938 start
= node_preorder_num
* allocno_hard_regs_nodes_num
;
939 i
= allocno_hard_regs_subnode_index
[start
+ removed_node
->preorder_num
];
942 subnodes
= allocno_hard_regs_subnodes
+ data
->hard_regs_subnodes_start
;
944 = (subnodes
[i
].left_conflict_subnodes_size
945 + MIN (subnodes
[i
].max_node_impact
946 - subnodes
[i
].left_conflict_subnodes_size
,
947 subnodes
[i
].left_conflict_size
));
948 subnodes
[i
].left_conflict_size
-= size
;
952 = (subnodes
[i
].left_conflict_subnodes_size
953 + MIN (subnodes
[i
].max_node_impact
954 - subnodes
[i
].left_conflict_subnodes_size
,
955 subnodes
[i
].left_conflict_size
));
956 if ((diff
= before_conflict_size
- conflict_size
) == 0)
958 ira_assert (conflict_size
< before_conflict_size
);
959 parent
= allocno_hard_regs_nodes
[i
+ node_preorder_num
]->parent
;
963 = allocno_hard_regs_subnode_index
[start
+ parent
->preorder_num
];
968 = (subnodes
[i
].left_conflict_subnodes_size
969 + MIN (subnodes
[i
].max_node_impact
970 - subnodes
[i
].left_conflict_subnodes_size
,
971 subnodes
[i
].left_conflict_size
));
972 subnodes
[i
].left_conflict_subnodes_size
-= diff
;
976 + ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]
977 > data
->available_regs_num
))
979 data
->colorable_p
= true;
983 /* Return true if allocno A has empty profitable hard regs. */
985 empty_profitable_hard_regs (ira_allocno_t a
)
987 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
989 return hard_reg_set_empty_p (data
->profitable_hard_regs
);
992 /* Set up profitable hard registers for each allocno being
995 setup_profitable_hard_regs (void)
998 int j
, k
, nobj
, hard_regno
, nregs
, class_size
;
1001 enum reg_class aclass
;
1002 enum machine_mode mode
;
1003 allocno_color_data_t data
;
1005 /* Initial set up from allocno classes and explicitly conflicting
1007 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
1009 a
= ira_allocnos
[i
];
1010 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
)
1012 data
= ALLOCNO_COLOR_DATA (a
);
1013 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
1014 && ALLOCNO_CLASS_COST (a
) > ALLOCNO_MEMORY_COST (a
))
1015 CLEAR_HARD_REG_SET (data
->profitable_hard_regs
);
1018 mode
= ALLOCNO_MODE (a
);
1019 COPY_HARD_REG_SET (data
->profitable_hard_regs
,
1020 ira_useful_class_mode_regs
[aclass
][mode
]);
1021 nobj
= ALLOCNO_NUM_OBJECTS (a
);
1022 for (k
= 0; k
< nobj
; k
++)
1024 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
1026 AND_COMPL_HARD_REG_SET (data
->profitable_hard_regs
,
1027 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
1031 /* Exclude hard regs already assigned for conflicting objects. */
1032 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, i
, bi
)
1034 a
= ira_allocnos
[i
];
1035 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
1036 || ! ALLOCNO_ASSIGNED_P (a
)
1037 || (hard_regno
= ALLOCNO_HARD_REGNO (a
)) < 0)
1039 mode
= ALLOCNO_MODE (a
);
1040 nregs
= hard_regno_nregs
[hard_regno
][mode
];
1041 nobj
= ALLOCNO_NUM_OBJECTS (a
);
1042 for (k
= 0; k
< nobj
; k
++)
1044 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
1045 ira_object_t conflict_obj
;
1046 ira_object_conflict_iterator oci
;
1048 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
1050 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
1052 /* We can process the conflict allocno repeatedly with
1054 if (nregs
== nobj
&& nregs
> 1)
1056 int num
= OBJECT_SUBWORD (conflict_obj
);
1058 if (REG_WORDS_BIG_ENDIAN
)
1060 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1061 hard_regno
+ nobj
- num
- 1);
1064 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1068 AND_COMPL_HARD_REG_SET
1069 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1070 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
1074 /* Exclude too costly hard regs. */
1075 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
1077 int min_cost
= INT_MAX
;
1080 a
= ira_allocnos
[i
];
1081 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
1082 || empty_profitable_hard_regs (a
))
1084 data
= ALLOCNO_COLOR_DATA (a
);
1085 mode
= ALLOCNO_MODE (a
);
1086 if ((costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
)) != NULL
1087 || (costs
= ALLOCNO_HARD_REG_COSTS (a
)) != NULL
)
1089 class_size
= ira_class_hard_regs_num
[aclass
];
1090 for (j
= 0; j
< class_size
; j
++)
1092 hard_regno
= ira_class_hard_regs
[aclass
][j
];
1093 if (! TEST_HARD_REG_BIT (data
->profitable_hard_regs
,
1096 if (ALLOCNO_UPDATED_MEMORY_COST (a
) < costs
[j
])
1097 CLEAR_HARD_REG_BIT (data
->profitable_hard_regs
,
1099 else if (min_cost
> costs
[j
])
1100 min_cost
= costs
[j
];
1103 else if (ALLOCNO_UPDATED_MEMORY_COST (a
)
1104 < ALLOCNO_UPDATED_CLASS_COST (a
))
1105 CLEAR_HARD_REG_SET (data
->profitable_hard_regs
);
1106 if (ALLOCNO_UPDATED_CLASS_COST (a
) > min_cost
)
1107 ALLOCNO_UPDATED_CLASS_COST (a
) = min_cost
;
1113 /* This page contains functions used to choose hard registers for
1116 /* Array whose element value is TRUE if the corresponding hard
1117 register was already allocated for an allocno. */
1118 static bool allocated_hardreg_p
[FIRST_PSEUDO_REGISTER
];
1120 /* Describes one element in a queue of allocnos whose costs need to be
1121 updated. Each allocno in the queue is known to have an allocno
1123 struct update_cost_queue_elem
1125 /* This element is in the queue iff CHECK == update_cost_check. */
1128 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1129 connecting this allocno to the one being allocated. */
1132 /* The next allocno in the queue, or null if this is the last element. */
1136 /* The first element in a queue of allocnos whose copy costs need to be
1137 updated. Null if the queue is empty. */
1138 static ira_allocno_t update_cost_queue
;
1140 /* The last element in the queue described by update_cost_queue.
1141 Not valid if update_cost_queue is null. */
1142 static struct update_cost_queue_elem
*update_cost_queue_tail
;
1144 /* A pool of elements in the queue described by update_cost_queue.
1145 Elements are indexed by ALLOCNO_NUM. */
1146 static struct update_cost_queue_elem
*update_cost_queue_elems
;
1148 /* The current value of update_copy_cost call count. */
1149 static int update_cost_check
;
1151 /* Allocate and initialize data necessary for function
1152 update_copy_costs. */
1154 initiate_cost_update (void)
1158 size
= ira_allocnos_num
* sizeof (struct update_cost_queue_elem
);
1159 update_cost_queue_elems
1160 = (struct update_cost_queue_elem
*) ira_allocate (size
);
1161 memset (update_cost_queue_elems
, 0, size
);
1162 update_cost_check
= 0;
1165 /* Deallocate data used by function update_copy_costs. */
1167 finish_cost_update (void)
1169 ira_free (update_cost_queue_elems
);
1172 /* When we traverse allocnos to update hard register costs, the cost
1173 divisor will be multiplied by the following macro value for each
1174 hop from given allocno to directly connected allocnos. */
1175 #define COST_HOP_DIVISOR 4
1177 /* Start a new cost-updating pass. */
1179 start_update_cost (void)
1181 update_cost_check
++;
1182 update_cost_queue
= NULL
;
1185 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue, unless
1186 ALLOCNO is already in the queue, or has NO_REGS class. */
1188 queue_update_cost (ira_allocno_t allocno
, int divisor
)
1190 struct update_cost_queue_elem
*elem
;
1192 elem
= &update_cost_queue_elems
[ALLOCNO_NUM (allocno
)];
1193 if (elem
->check
!= update_cost_check
1194 && ALLOCNO_CLASS (allocno
) != NO_REGS
)
1196 elem
->check
= update_cost_check
;
1197 elem
->divisor
= divisor
;
1199 if (update_cost_queue
== NULL
)
1200 update_cost_queue
= allocno
;
1202 update_cost_queue_tail
->next
= allocno
;
1203 update_cost_queue_tail
= elem
;
1207 /* Try to remove the first element from update_cost_queue. Return false
1208 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
1209 the removed element. */
1211 get_next_update_cost (ira_allocno_t
*allocno
, int *divisor
)
1213 struct update_cost_queue_elem
*elem
;
1215 if (update_cost_queue
== NULL
)
1218 *allocno
= update_cost_queue
;
1219 elem
= &update_cost_queue_elems
[ALLOCNO_NUM (*allocno
)];
1220 *divisor
= elem
->divisor
;
1221 update_cost_queue
= elem
->next
;
1225 /* Update the cost of allocnos to increase chances to remove some
1226 copies as the result of subsequent assignment. */
1228 update_copy_costs (ira_allocno_t allocno
, bool decr_p
)
1230 int i
, cost
, update_cost
, hard_regno
, divisor
;
1231 enum machine_mode mode
;
1232 enum reg_class rclass
, aclass
;
1233 ira_allocno_t another_allocno
;
1234 ira_copy_t cp
, next_cp
;
1236 hard_regno
= ALLOCNO_HARD_REGNO (allocno
);
1237 ira_assert (hard_regno
>= 0);
1239 aclass
= ALLOCNO_CLASS (allocno
);
1240 if (aclass
== NO_REGS
)
1242 i
= ira_class_hard_reg_index
[aclass
][hard_regno
];
1243 ira_assert (i
>= 0);
1244 rclass
= REGNO_REG_CLASS (hard_regno
);
1246 start_update_cost ();
1250 mode
= ALLOCNO_MODE (allocno
);
1251 ira_init_register_move_cost_if_necessary (mode
);
1252 for (cp
= ALLOCNO_COPIES (allocno
); cp
!= NULL
; cp
= next_cp
)
1254 if (cp
->first
== allocno
)
1256 next_cp
= cp
->next_first_allocno_copy
;
1257 another_allocno
= cp
->second
;
1259 else if (cp
->second
== allocno
)
1261 next_cp
= cp
->next_second_allocno_copy
;
1262 another_allocno
= cp
->first
;
1267 aclass
= ALLOCNO_CLASS (another_allocno
);
1268 if (! TEST_HARD_REG_BIT (reg_class_contents
[aclass
],
1270 || ALLOCNO_ASSIGNED_P (another_allocno
))
1273 cost
= (cp
->second
== allocno
1274 ? ira_register_move_cost
[mode
][rclass
][aclass
]
1275 : ira_register_move_cost
[mode
][aclass
][rclass
]);
1279 update_cost
= cp
->freq
* cost
/ divisor
;
1280 if (update_cost
== 0)
1283 ira_allocate_and_set_or_copy_costs
1284 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno
), aclass
,
1285 ALLOCNO_UPDATED_CLASS_COST (another_allocno
),
1286 ALLOCNO_HARD_REG_COSTS (another_allocno
));
1287 ira_allocate_and_set_or_copy_costs
1288 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
),
1289 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno
));
1290 i
= ira_class_hard_reg_index
[aclass
][hard_regno
];
1293 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno
)[i
] += update_cost
;
1294 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
)[i
]
1297 queue_update_cost (another_allocno
, divisor
* COST_HOP_DIVISOR
);
1300 while (get_next_update_cost (&allocno
, &divisor
));
1303 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1304 of ACLASS by conflict costs of the unassigned allocnos
1305 connected by copies with allocnos in update_cost_queue. This
1306 update increases chances to remove some copies. */
1308 update_conflict_hard_regno_costs (int *costs
, enum reg_class aclass
,
1311 int i
, cost
, class_size
, freq
, mult
, div
, divisor
;
1312 int index
, hard_regno
;
1313 int *conflict_costs
;
1315 enum reg_class another_aclass
;
1316 ira_allocno_t allocno
, another_allocno
;
1317 ira_copy_t cp
, next_cp
;
1319 while (get_next_update_cost (&allocno
, &divisor
))
1320 for (cp
= ALLOCNO_COPIES (allocno
); cp
!= NULL
; cp
= next_cp
)
1322 if (cp
->first
== allocno
)
1324 next_cp
= cp
->next_first_allocno_copy
;
1325 another_allocno
= cp
->second
;
1327 else if (cp
->second
== allocno
)
1329 next_cp
= cp
->next_second_allocno_copy
;
1330 another_allocno
= cp
->first
;
1334 another_aclass
= ALLOCNO_CLASS (another_allocno
);
1335 if (! ira_reg_classes_intersect_p
[aclass
][another_aclass
]
1336 || ALLOCNO_ASSIGNED_P (another_allocno
)
1337 || ALLOCNO_COLOR_DATA (another_allocno
)->may_be_spilled_p
)
1339 class_size
= ira_class_hard_regs_num
[another_aclass
];
1340 ira_allocate_and_copy_costs
1341 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
),
1342 another_aclass
, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno
));
1344 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
);
1345 if (conflict_costs
== NULL
)
1350 freq
= ALLOCNO_FREQ (another_allocno
);
1353 div
= freq
* divisor
;
1355 for (i
= class_size
- 1; i
>= 0; i
--)
1357 hard_regno
= ira_class_hard_regs
[another_aclass
][i
];
1358 ira_assert (hard_regno
>= 0);
1359 index
= ira_class_hard_reg_index
[aclass
][hard_regno
];
1362 cost
= conflict_costs
[i
] * mult
/ div
;
1368 costs
[index
] += cost
;
1371 /* Probably 5 hops will be enough. */
1373 && divisor
<= (COST_HOP_DIVISOR
1376 * COST_HOP_DIVISOR
))
1377 queue_update_cost (another_allocno
, divisor
* COST_HOP_DIVISOR
);
1381 /* Set up conflicting (through CONFLICT_REGS) for each object of
1382 allocno A and the start allocno profitable regs (through
1383 START_PROFITABLE_REGS). Remember that the start profitable regs
1384 exclude hard regs which can not hold value of mode of allocno A.
1385 This covers mostly cases when multi-register value should be
1388 get_conflict_and_start_profitable_regs (ira_allocno_t a
, bool retry_p
,
1389 HARD_REG_SET
*conflict_regs
,
1390 HARD_REG_SET
*start_profitable_regs
)
1395 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1396 for (i
= 0; i
< nwords
; i
++)
1398 obj
= ALLOCNO_OBJECT (a
, i
);
1399 COPY_HARD_REG_SET (conflict_regs
[i
],
1400 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
1404 COPY_HARD_REG_SET (*start_profitable_regs
,
1405 reg_class_contents
[ALLOCNO_CLASS (a
)]);
1406 AND_COMPL_HARD_REG_SET (*start_profitable_regs
,
1407 ira_prohibited_class_mode_regs
1408 [ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]);
1411 COPY_HARD_REG_SET (*start_profitable_regs
,
1412 ALLOCNO_COLOR_DATA (a
)->profitable_hard_regs
);
1415 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1416 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1418 check_hard_reg_p (ira_allocno_t a
, int hard_regno
,
1419 HARD_REG_SET
*conflict_regs
, HARD_REG_SET profitable_regs
)
1421 int j
, nwords
, nregs
;
1422 enum reg_class aclass
;
1423 enum machine_mode mode
;
1425 aclass
= ALLOCNO_CLASS (a
);
1426 mode
= ALLOCNO_MODE (a
);
1427 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs
[aclass
][mode
],
1430 /* Checking only profitable hard regs. */
1431 if (! TEST_HARD_REG_BIT (profitable_regs
, hard_regno
))
1433 nregs
= hard_regno_nregs
[hard_regno
][mode
];
1434 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1435 for (j
= 0; j
< nregs
; j
++)
1438 int set_to_test_start
= 0, set_to_test_end
= nwords
;
1440 if (nregs
== nwords
)
1442 if (REG_WORDS_BIG_ENDIAN
)
1443 set_to_test_start
= nwords
- j
- 1;
1445 set_to_test_start
= j
;
1446 set_to_test_end
= set_to_test_start
+ 1;
1448 for (k
= set_to_test_start
; k
< set_to_test_end
; k
++)
1449 if (TEST_HARD_REG_BIT (conflict_regs
[k
], hard_regno
+ j
))
1451 if (k
!= set_to_test_end
)
1456 #ifndef HONOR_REG_ALLOC_ORDER
1458 /* Return number of registers needed to be saved and restored at
1459 function prologue/epilogue if we allocate HARD_REGNO to hold value
1462 calculate_saved_nregs (int hard_regno
, enum machine_mode mode
)
1467 ira_assert (hard_regno
>= 0);
1468 for (i
= hard_regno_nregs
[hard_regno
][mode
] - 1; i
>= 0; i
--)
1469 if (!allocated_hardreg_p
[hard_regno
+ i
]
1470 && !TEST_HARD_REG_BIT (call_used_reg_set
, hard_regno
+ i
)
1471 && !LOCAL_REGNO (hard_regno
+ i
))
1477 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1478 that the function called from function
1479 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1480 this case some allocno data are not defined or updated and we
1481 should not touch these data. The function returns true if we
1482 managed to assign a hard register to the allocno.
1484 To assign a hard register, first of all we calculate all conflict
1485 hard registers which can come from conflicting allocnos with
1486 already assigned hard registers. After that we find first free
1487 hard register with the minimal cost. During hard register cost
1488 calculation we take conflict hard register costs into account to
1489 give a chance for conflicting allocnos to get a better hard
1490 register in the future.
1492 If the best hard register cost is bigger than cost of memory usage
1493 for the allocno, we don't assign a hard register to given allocno
1496 If we assign a hard register to the allocno, we update costs of the
1497 hard register for allocnos connected by copies to improve a chance
1498 to coalesce insns represented by the copies when we assign hard
1499 registers to the allocnos connected by the copies. */
1501 assign_hard_reg (ira_allocno_t a
, bool retry_p
)
1503 HARD_REG_SET conflicting_regs
[2], profitable_hard_regs
;
1504 int i
, j
, hard_regno
, best_hard_regno
, class_size
;
1505 int cost
, mem_cost
, min_cost
, full_cost
, min_full_cost
, nwords
, word
;
1507 enum reg_class aclass
;
1508 enum machine_mode mode
;
1509 static int costs
[FIRST_PSEUDO_REGISTER
], full_costs
[FIRST_PSEUDO_REGISTER
];
1510 #ifndef HONOR_REG_ALLOC_ORDER
1512 enum reg_class rclass
;
1516 bool no_stack_reg_p
;
1519 ira_assert (! ALLOCNO_ASSIGNED_P (a
));
1520 get_conflict_and_start_profitable_regs (a
, retry_p
,
1522 &profitable_hard_regs
);
1523 aclass
= ALLOCNO_CLASS (a
);
1524 class_size
= ira_class_hard_regs_num
[aclass
];
1525 best_hard_regno
= -1;
1526 memset (full_costs
, 0, sizeof (int) * class_size
);
1528 memset (costs
, 0, sizeof (int) * class_size
);
1529 memset (full_costs
, 0, sizeof (int) * class_size
);
1531 no_stack_reg_p
= false;
1534 start_update_cost ();
1535 mem_cost
+= ALLOCNO_UPDATED_MEMORY_COST (a
);
1537 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a
),
1538 aclass
, ALLOCNO_HARD_REG_COSTS (a
));
1539 a_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
);
1541 no_stack_reg_p
= no_stack_reg_p
|| ALLOCNO_TOTAL_NO_STACK_REG_P (a
);
1543 cost
= ALLOCNO_UPDATED_CLASS_COST (a
);
1544 for (i
= 0; i
< class_size
; i
++)
1545 if (a_costs
!= NULL
)
1547 costs
[i
] += a_costs
[i
];
1548 full_costs
[i
] += a_costs
[i
];
1553 full_costs
[i
] += cost
;
1555 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1556 curr_allocno_process
++;
1557 for (word
= 0; word
< nwords
; word
++)
1559 ira_object_t conflict_obj
;
1560 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
1561 ira_object_conflict_iterator oci
;
1563 /* Take preferences of conflicting allocnos into account. */
1564 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
1566 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
1567 enum reg_class conflict_aclass
;
1569 /* Reload can give another class so we need to check all
1572 && (!bitmap_bit_p (consideration_allocno_bitmap
,
1573 ALLOCNO_NUM (conflict_a
))
1574 || ((!ALLOCNO_ASSIGNED_P (conflict_a
)
1575 || ALLOCNO_HARD_REGNO (conflict_a
) < 0)
1576 && !(hard_reg_set_intersect_p
1577 (profitable_hard_regs
,
1579 (conflict_a
)->profitable_hard_regs
)))))
1581 conflict_aclass
= ALLOCNO_CLASS (conflict_a
);
1582 ira_assert (ira_reg_classes_intersect_p
1583 [aclass
][conflict_aclass
]);
1584 if (ALLOCNO_ASSIGNED_P (conflict_a
))
1586 hard_regno
= ALLOCNO_HARD_REGNO (conflict_a
);
1588 && (ira_hard_reg_set_intersection_p
1589 (hard_regno
, ALLOCNO_MODE (conflict_a
),
1590 reg_class_contents
[aclass
])))
1592 int n_objects
= ALLOCNO_NUM_OBJECTS (conflict_a
);
1595 mode
= ALLOCNO_MODE (conflict_a
);
1596 conflict_nregs
= hard_regno_nregs
[hard_regno
][mode
];
1597 if (conflict_nregs
== n_objects
&& conflict_nregs
> 1)
1599 int num
= OBJECT_SUBWORD (conflict_obj
);
1601 if (REG_WORDS_BIG_ENDIAN
)
1602 SET_HARD_REG_BIT (conflicting_regs
[word
],
1603 hard_regno
+ n_objects
- num
- 1);
1605 SET_HARD_REG_BIT (conflicting_regs
[word
],
1610 (conflicting_regs
[word
],
1611 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
1612 if (hard_reg_set_subset_p (profitable_hard_regs
,
1613 conflicting_regs
[word
]))
1618 && ! ALLOCNO_COLOR_DATA (conflict_a
)->may_be_spilled_p
1619 /* Don't process the conflict allocno twice. */
1620 && (ALLOCNO_COLOR_DATA (conflict_a
)->last_process
1621 != curr_allocno_process
))
1623 int k
, *conflict_costs
;
1625 ALLOCNO_COLOR_DATA (conflict_a
)->last_process
1626 = curr_allocno_process
;
1627 ira_allocate_and_copy_costs
1628 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a
),
1630 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a
));
1632 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a
);
1633 if (conflict_costs
!= NULL
)
1634 for (j
= class_size
- 1; j
>= 0; j
--)
1636 hard_regno
= ira_class_hard_regs
[aclass
][j
];
1637 ira_assert (hard_regno
>= 0);
1638 k
= ira_class_hard_reg_index
[conflict_aclass
][hard_regno
];
1641 full_costs
[j
] -= conflict_costs
[k
];
1643 queue_update_cost (conflict_a
, COST_HOP_DIVISOR
);
1648 /* Take into account preferences of allocnos connected by copies to
1649 the conflict allocnos. */
1650 update_conflict_hard_regno_costs (full_costs
, aclass
, true);
1652 /* Take preferences of allocnos connected by copies into
1656 start_update_cost ();
1657 queue_update_cost (a
, COST_HOP_DIVISOR
);
1658 update_conflict_hard_regno_costs (full_costs
, aclass
, false);
1660 min_cost
= min_full_cost
= INT_MAX
;
1661 /* We don't care about giving callee saved registers to allocnos no
1662 living through calls because call clobbered registers are
1663 allocated first (it is usual practice to put them first in
1664 REG_ALLOC_ORDER). */
1665 mode
= ALLOCNO_MODE (a
);
1666 for (i
= 0; i
< class_size
; i
++)
1668 hard_regno
= ira_class_hard_regs
[aclass
][i
];
1671 && FIRST_STACK_REG
<= hard_regno
&& hard_regno
<= LAST_STACK_REG
)
1674 if (! check_hard_reg_p (a
, hard_regno
,
1675 conflicting_regs
, profitable_hard_regs
))
1678 full_cost
= full_costs
[i
];
1679 #ifndef HONOR_REG_ALLOC_ORDER
1680 if ((saved_nregs
= calculate_saved_nregs (hard_regno
, mode
)) != 0)
1681 /* We need to save/restore the hard register in
1682 epilogue/prologue. Therefore we increase the cost. */
1684 rclass
= REGNO_REG_CLASS (hard_regno
);
1685 add_cost
= ((ira_memory_move_cost
[mode
][rclass
][0]
1686 + ira_memory_move_cost
[mode
][rclass
][1])
1687 * saved_nregs
/ hard_regno_nregs
[hard_regno
][mode
] - 1);
1689 full_cost
+= add_cost
;
1692 if (min_cost
> cost
)
1694 if (min_full_cost
> full_cost
)
1696 min_full_cost
= full_cost
;
1697 best_hard_regno
= hard_regno
;
1698 ira_assert (hard_regno
>= 0);
1701 if (min_full_cost
> mem_cost
)
1703 if (! retry_p
&& internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
1704 fprintf (ira_dump_file
, "(memory is more profitable %d vs %d) ",
1705 mem_cost
, min_full_cost
);
1706 best_hard_regno
= -1;
1709 if (best_hard_regno
>= 0)
1711 for (i
= hard_regno_nregs
[best_hard_regno
][mode
] - 1; i
>= 0; i
--)
1712 allocated_hardreg_p
[best_hard_regno
+ i
] = true;
1714 ALLOCNO_HARD_REGNO (a
) = best_hard_regno
;
1715 ALLOCNO_ASSIGNED_P (a
) = true;
1716 if (best_hard_regno
>= 0)
1717 update_copy_costs (a
, true);
1718 ira_assert (ALLOCNO_CLASS (a
) == aclass
);
1719 /* We don't need updated costs anymore: */
1720 ira_free_allocno_updated_costs (a
);
1721 return best_hard_regno
>= 0;
1726 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
1728 /* Bucket of allocnos that can colored currently without spilling. */
1729 static ira_allocno_t colorable_allocno_bucket
;
1731 /* Bucket of allocnos that might be not colored currently without
1733 static ira_allocno_t uncolorable_allocno_bucket
;
1735 /* The current number of allocnos in the uncolorable_bucket. */
1736 static int uncolorable_allocnos_num
;
1738 /* Return the current spill priority of allocno A. The less the
1739 number, the more preferable the allocno for spilling. */
1741 allocno_spill_priority (ira_allocno_t a
)
1743 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
1746 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
1747 * ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]
1751 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
1754 add_allocno_to_bucket (ira_allocno_t a
, ira_allocno_t
*bucket_ptr
)
1756 ira_allocno_t first_a
;
1757 allocno_color_data_t data
;
1759 if (bucket_ptr
== &uncolorable_allocno_bucket
1760 && ALLOCNO_CLASS (a
) != NO_REGS
)
1762 uncolorable_allocnos_num
++;
1763 ira_assert (uncolorable_allocnos_num
> 0);
1765 first_a
= *bucket_ptr
;
1766 data
= ALLOCNO_COLOR_DATA (a
);
1767 data
->next_bucket_allocno
= first_a
;
1768 data
->prev_bucket_allocno
= NULL
;
1769 if (first_a
!= NULL
)
1770 ALLOCNO_COLOR_DATA (first_a
)->prev_bucket_allocno
= a
;
1774 /* Compare two allocnos to define which allocno should be pushed first
1775 into the coloring stack. If the return is a negative number, the
1776 allocno given by the first parameter will be pushed first. In this
1777 case such allocno has less priority than the second one and the
1778 hard register will be assigned to it after assignment to the second
1779 one. As the result of such assignment order, the second allocno
1780 has a better chance to get the best hard register. */
1782 bucket_allocno_compare_func (const void *v1p
, const void *v2p
)
1784 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
1785 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
1786 int diff
, a1_freq
, a2_freq
, a1_num
, a2_num
;
1787 int cl1
= ALLOCNO_CLASS (a1
), cl2
= ALLOCNO_CLASS (a2
);
1789 /* Push pseudos requiring less hard registers first. It means that
1790 we will assign pseudos requiring more hard registers first
1791 avoiding creation small holes in free hard register file into
1792 which the pseudos requiring more hard registers can not fit. */
1793 if ((diff
= (ira_reg_class_max_nregs
[cl1
][ALLOCNO_MODE (a1
)]
1794 - ira_reg_class_max_nregs
[cl2
][ALLOCNO_MODE (a2
)])) != 0)
1796 a1_freq
= ALLOCNO_FREQ (a1
);
1797 a2_freq
= ALLOCNO_FREQ (a2
);
1798 if ((diff
= a1_freq
- a2_freq
) != 0)
1800 a1_num
= ALLOCNO_COLOR_DATA (a1
)->available_regs_num
;
1801 a2_num
= ALLOCNO_COLOR_DATA (a2
)->available_regs_num
;
1802 if ((diff
= a2_num
- a1_num
) != 0)
1804 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
1807 /* Sort bucket *BUCKET_PTR and return the result through
1810 sort_bucket (ira_allocno_t
*bucket_ptr
,
1811 int (*compare_func
) (const void *, const void *))
1813 ira_allocno_t a
, head
;
1816 for (n
= 0, a
= *bucket_ptr
;
1818 a
= ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
)
1819 sorted_allocnos
[n
++] = a
;
1822 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
), compare_func
);
1824 for (n
--; n
>= 0; n
--)
1826 a
= sorted_allocnos
[n
];
1827 ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
= head
;
1828 ALLOCNO_COLOR_DATA (a
)->prev_bucket_allocno
= NULL
;
1830 ALLOCNO_COLOR_DATA (head
)->prev_bucket_allocno
= a
;
1836 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
1837 their priority. ALLOCNO should be not in a bucket before the
1840 add_allocno_to_ordered_bucket (ira_allocno_t allocno
,
1841 ira_allocno_t
*bucket_ptr
)
1843 ira_allocno_t before
, after
;
1845 if (bucket_ptr
== &uncolorable_allocno_bucket
1846 && ALLOCNO_CLASS (allocno
) != NO_REGS
)
1848 uncolorable_allocnos_num
++;
1849 ira_assert (uncolorable_allocnos_num
> 0);
1851 for (before
= *bucket_ptr
, after
= NULL
;
1854 before
= ALLOCNO_COLOR_DATA (before
)->next_bucket_allocno
)
1855 if (bucket_allocno_compare_func (&allocno
, &before
) < 0)
1857 ALLOCNO_COLOR_DATA (allocno
)->next_bucket_allocno
= before
;
1858 ALLOCNO_COLOR_DATA (allocno
)->prev_bucket_allocno
= after
;
1860 *bucket_ptr
= allocno
;
1862 ALLOCNO_COLOR_DATA (after
)->next_bucket_allocno
= allocno
;
1864 ALLOCNO_COLOR_DATA (before
)->prev_bucket_allocno
= allocno
;
1867 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
1870 delete_allocno_from_bucket (ira_allocno_t allocno
, ira_allocno_t
*bucket_ptr
)
1872 ira_allocno_t prev_allocno
, next_allocno
;
1874 if (bucket_ptr
== &uncolorable_allocno_bucket
1875 && ALLOCNO_CLASS (allocno
) != NO_REGS
)
1877 uncolorable_allocnos_num
--;
1878 ira_assert (uncolorable_allocnos_num
>= 0);
1880 prev_allocno
= ALLOCNO_COLOR_DATA (allocno
)->prev_bucket_allocno
;
1881 next_allocno
= ALLOCNO_COLOR_DATA (allocno
)->next_bucket_allocno
;
1882 if (prev_allocno
!= NULL
)
1883 ALLOCNO_COLOR_DATA (prev_allocno
)->next_bucket_allocno
= next_allocno
;
1886 ira_assert (*bucket_ptr
== allocno
);
1887 *bucket_ptr
= next_allocno
;
1889 if (next_allocno
!= NULL
)
1890 ALLOCNO_COLOR_DATA (next_allocno
)->prev_bucket_allocno
= prev_allocno
;
1893 /* Put allocno A onto the coloring stack without removing it from its
1894 bucket. Pushing allocno to the coloring stack can result in moving
1895 conflicting allocnos from the uncolorable bucket to the colorable
1898 push_allocno_to_stack (ira_allocno_t a
)
1900 enum reg_class aclass
;
1901 allocno_color_data_t data
, conflict_data
;
1902 int size
, i
, n
= ALLOCNO_NUM_OBJECTS (a
);
1904 data
= ALLOCNO_COLOR_DATA (a
);
1905 data
->in_graph_p
= false;
1906 allocno_stack_vec
.safe_push (a
);
1907 aclass
= ALLOCNO_CLASS (a
);
1908 if (aclass
== NO_REGS
)
1910 size
= ira_reg_class_max_nregs
[aclass
][ALLOCNO_MODE (a
)];
1913 /* We will deal with the subwords individually. */
1914 gcc_assert (size
== ALLOCNO_NUM_OBJECTS (a
));
1917 for (i
= 0; i
< n
; i
++)
1919 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
1920 ira_object_t conflict_obj
;
1921 ira_object_conflict_iterator oci
;
1923 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
1925 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
1927 conflict_data
= ALLOCNO_COLOR_DATA (conflict_a
);
1928 if (conflict_data
->colorable_p
1929 || ! conflict_data
->in_graph_p
1930 || ALLOCNO_ASSIGNED_P (conflict_a
)
1931 || !(hard_reg_set_intersect_p
1932 (ALLOCNO_COLOR_DATA (a
)->profitable_hard_regs
,
1933 conflict_data
->profitable_hard_regs
)))
1935 ira_assert (bitmap_bit_p (coloring_allocno_bitmap
,
1936 ALLOCNO_NUM (conflict_a
)));
1937 if (update_left_conflict_sizes_p (conflict_a
, a
, size
))
1939 delete_allocno_from_bucket
1940 (conflict_a
, &uncolorable_allocno_bucket
);
1941 add_allocno_to_ordered_bucket
1942 (conflict_a
, &colorable_allocno_bucket
);
1943 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
1945 fprintf (ira_dump_file
, " Making");
1946 ira_print_expanded_allocno (conflict_a
);
1947 fprintf (ira_dump_file
, " colorable\n");
1955 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
1956 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
1958 remove_allocno_from_bucket_and_push (ira_allocno_t allocno
, bool colorable_p
)
1961 delete_allocno_from_bucket (allocno
, &colorable_allocno_bucket
);
1963 delete_allocno_from_bucket (allocno
, &uncolorable_allocno_bucket
);
1964 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
1966 fprintf (ira_dump_file
, " Pushing");
1967 ira_print_expanded_allocno (allocno
);
1969 fprintf (ira_dump_file
, "(cost %d)\n",
1970 ALLOCNO_COLOR_DATA (allocno
)->temp
);
1972 fprintf (ira_dump_file
, "(potential spill: %spri=%d, cost=%d)\n",
1973 ALLOCNO_BAD_SPILL_P (allocno
) ? "bad spill, " : "",
1974 allocno_spill_priority (allocno
),
1975 ALLOCNO_COLOR_DATA (allocno
)->temp
);
1978 ALLOCNO_COLOR_DATA (allocno
)->may_be_spilled_p
= true;
1979 push_allocno_to_stack (allocno
);
1982 /* Put all allocnos from colorable bucket onto the coloring stack. */
1984 push_only_colorable (void)
1986 sort_bucket (&colorable_allocno_bucket
, bucket_allocno_compare_func
);
1987 for (;colorable_allocno_bucket
!= NULL
;)
1988 remove_allocno_from_bucket_and_push (colorable_allocno_bucket
, true);
1991 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
1992 loop given by its LOOP_NODE. */
1994 ira_loop_edge_freq (ira_loop_tree_node_t loop_node
, int regno
, bool exit_p
)
2001 ira_assert (current_loops
!= NULL
&& loop_node
->loop
!= NULL
2002 && (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
));
2006 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
2007 if (e
->src
!= loop_node
->loop
->latch
2009 || (bitmap_bit_p (df_get_live_out (e
->src
), regno
)
2010 && bitmap_bit_p (df_get_live_in (e
->dest
), regno
))))
2011 freq
+= EDGE_FREQUENCY (e
);
2015 edges
= get_loop_exit_edges (loop_node
->loop
);
2016 FOR_EACH_VEC_ELT (edges
, i
, e
)
2018 || (bitmap_bit_p (df_get_live_out (e
->src
), regno
)
2019 && bitmap_bit_p (df_get_live_in (e
->dest
), regno
)))
2020 freq
+= EDGE_FREQUENCY (e
);
2024 return REG_FREQ_FROM_EDGE_FREQ (freq
);
2027 /* Calculate and return the cost of putting allocno A into memory. */
2029 calculate_allocno_spill_cost (ira_allocno_t a
)
2032 enum machine_mode mode
;
2033 enum reg_class rclass
;
2034 ira_allocno_t parent_allocno
;
2035 ira_loop_tree_node_t parent_node
, loop_node
;
2037 regno
= ALLOCNO_REGNO (a
);
2038 cost
= ALLOCNO_UPDATED_MEMORY_COST (a
) - ALLOCNO_UPDATED_CLASS_COST (a
);
2039 if (ALLOCNO_CAP (a
) != NULL
)
2041 loop_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2042 if ((parent_node
= loop_node
->parent
) == NULL
)
2044 if ((parent_allocno
= parent_node
->regno_allocno_map
[regno
]) == NULL
)
2046 mode
= ALLOCNO_MODE (a
);
2047 rclass
= ALLOCNO_CLASS (a
);
2048 if (ALLOCNO_HARD_REGNO (parent_allocno
) < 0)
2049 cost
-= (ira_memory_move_cost
[mode
][rclass
][0]
2050 * ira_loop_edge_freq (loop_node
, regno
, true)
2051 + ira_memory_move_cost
[mode
][rclass
][1]
2052 * ira_loop_edge_freq (loop_node
, regno
, false));
2055 ira_init_register_move_cost_if_necessary (mode
);
2056 cost
+= ((ira_memory_move_cost
[mode
][rclass
][1]
2057 * ira_loop_edge_freq (loop_node
, regno
, true)
2058 + ira_memory_move_cost
[mode
][rclass
][0]
2059 * ira_loop_edge_freq (loop_node
, regno
, false))
2060 - (ira_register_move_cost
[mode
][rclass
][rclass
]
2061 * (ira_loop_edge_freq (loop_node
, regno
, false)
2062 + ira_loop_edge_freq (loop_node
, regno
, true))));
2067 /* Used for sorting allocnos for spilling. */
2069 allocno_spill_priority_compare (ira_allocno_t a1
, ira_allocno_t a2
)
2071 int pri1
, pri2
, diff
;
2073 if (ALLOCNO_BAD_SPILL_P (a1
) && ! ALLOCNO_BAD_SPILL_P (a2
))
2075 if (ALLOCNO_BAD_SPILL_P (a2
) && ! ALLOCNO_BAD_SPILL_P (a1
))
2077 pri1
= allocno_spill_priority (a1
);
2078 pri2
= allocno_spill_priority (a2
);
2079 if ((diff
= pri1
- pri2
) != 0)
2082 = ALLOCNO_COLOR_DATA (a1
)->temp
- ALLOCNO_COLOR_DATA (a2
)->temp
) != 0)
2084 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2087 /* Used for sorting allocnos for spilling. */
2089 allocno_spill_sort_compare (const void *v1p
, const void *v2p
)
2091 ira_allocno_t p1
= *(const ira_allocno_t
*) v1p
;
2092 ira_allocno_t p2
= *(const ira_allocno_t
*) v2p
;
2094 return allocno_spill_priority_compare (p1
, p2
);
2097 /* Push allocnos to the coloring stack. The order of allocnos in the
2098 stack defines the order for the subsequent coloring. */
2100 push_allocnos_to_stack (void)
2105 /* Calculate uncolorable allocno spill costs. */
2106 for (a
= uncolorable_allocno_bucket
;
2108 a
= ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
)
2109 if (ALLOCNO_CLASS (a
) != NO_REGS
)
2111 cost
= calculate_allocno_spill_cost (a
);
2112 /* ??? Remove cost of copies between the coalesced
2114 ALLOCNO_COLOR_DATA (a
)->temp
= cost
;
2116 sort_bucket (&uncolorable_allocno_bucket
, allocno_spill_sort_compare
);
2119 push_only_colorable ();
2120 a
= uncolorable_allocno_bucket
;
2123 remove_allocno_from_bucket_and_push (a
, false);
2125 ira_assert (colorable_allocno_bucket
== NULL
2126 && uncolorable_allocno_bucket
== NULL
);
2127 ira_assert (uncolorable_allocnos_num
== 0);
2130 /* Pop the coloring stack and assign hard registers to the popped
2133 pop_allocnos_from_stack (void)
2135 ira_allocno_t allocno
;
2136 enum reg_class aclass
;
2138 for (;allocno_stack_vec
.length () != 0;)
2140 allocno
= allocno_stack_vec
.pop ();
2141 aclass
= ALLOCNO_CLASS (allocno
);
2142 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2144 fprintf (ira_dump_file
, " Popping");
2145 ira_print_expanded_allocno (allocno
);
2146 fprintf (ira_dump_file
, " -- ");
2148 if (aclass
== NO_REGS
)
2150 ALLOCNO_HARD_REGNO (allocno
) = -1;
2151 ALLOCNO_ASSIGNED_P (allocno
) = true;
2152 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno
) == NULL
);
2154 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno
) == NULL
);
2155 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2156 fprintf (ira_dump_file
, "assign memory\n");
2158 else if (assign_hard_reg (allocno
, false))
2160 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2161 fprintf (ira_dump_file
, "assign reg %d\n",
2162 ALLOCNO_HARD_REGNO (allocno
));
2164 else if (ALLOCNO_ASSIGNED_P (allocno
))
2166 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2167 fprintf (ira_dump_file
, "spill\n");
2169 ALLOCNO_COLOR_DATA (allocno
)->in_graph_p
= true;
2173 /* Set up number of available hard registers for allocno A. */
2175 setup_allocno_available_regs_num (ira_allocno_t a
)
2177 int i
, n
, hard_regno
, hard_regs_num
, nwords
;
2178 enum reg_class aclass
;
2179 allocno_color_data_t data
;
2181 aclass
= ALLOCNO_CLASS (a
);
2182 data
= ALLOCNO_COLOR_DATA (a
);
2183 data
->available_regs_num
= 0;
2184 if (aclass
== NO_REGS
)
2186 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
2187 nwords
= ALLOCNO_NUM_OBJECTS (a
);
2188 for (n
= 0, i
= hard_regs_num
- 1; i
>= 0; i
--)
2190 hard_regno
= ira_class_hard_regs
[aclass
][i
];
2191 /* Checking only profitable hard regs. */
2192 if (TEST_HARD_REG_BIT (data
->profitable_hard_regs
, hard_regno
))
2195 data
->available_regs_num
= n
;
2196 if (internal_flag_ira_verbose
<= 2 || ira_dump_file
== NULL
)
2200 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2201 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
),
2202 reg_class_names
[aclass
], ira_class_hard_regs_num
[aclass
], n
);
2203 print_hard_reg_set (ira_dump_file
, data
->profitable_hard_regs
, false);
2204 fprintf (ira_dump_file
, ", %snode: ",
2205 hard_reg_set_equal_p (data
->profitable_hard_regs
,
2206 data
->hard_regs_node
->hard_regs
->set
)
2208 print_hard_reg_set (ira_dump_file
,
2209 data
->hard_regs_node
->hard_regs
->set
, false);
2210 for (i
= 0; i
< nwords
; i
++)
2212 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2217 fprintf (ira_dump_file
, ", ");
2218 fprintf (ira_dump_file
, " obj %d", i
);
2220 fprintf (ira_dump_file
, " (confl regs = ");
2221 print_hard_reg_set (ira_dump_file
, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
2223 fprintf (ira_dump_file
, ")");
2225 fprintf (ira_dump_file
, "\n");
2228 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2229 conflicting allocnos and hard registers. */
2231 put_allocno_into_bucket (ira_allocno_t allocno
)
2233 ALLOCNO_COLOR_DATA (allocno
)->in_graph_p
= true;
2234 setup_allocno_available_regs_num (allocno
);
2235 if (setup_left_conflict_sizes_p (allocno
))
2236 add_allocno_to_bucket (allocno
, &colorable_allocno_bucket
);
2238 add_allocno_to_bucket (allocno
, &uncolorable_allocno_bucket
);
2241 /* Map: allocno number -> allocno priority. */
2242 static int *allocno_priorities
;
2244 /* Set up priorities for N allocnos in array
2245 CONSIDERATION_ALLOCNOS. */
2247 setup_allocno_priorities (ira_allocno_t
*consideration_allocnos
, int n
)
2249 int i
, length
, nrefs
, priority
, max_priority
, mult
;
2253 for (i
= 0; i
< n
; i
++)
2255 a
= consideration_allocnos
[i
];
2256 nrefs
= ALLOCNO_NREFS (a
);
2257 ira_assert (nrefs
>= 0);
2258 mult
= floor_log2 (ALLOCNO_NREFS (a
)) + 1;
2259 ira_assert (mult
>= 0);
2260 allocno_priorities
[ALLOCNO_NUM (a
)]
2263 * (ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
))
2264 * ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]);
2266 priority
= -priority
;
2267 if (max_priority
< priority
)
2268 max_priority
= priority
;
2270 mult
= max_priority
== 0 ? 1 : INT_MAX
/ max_priority
;
2271 for (i
= 0; i
< n
; i
++)
2273 a
= consideration_allocnos
[i
];
2274 length
= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2275 if (ALLOCNO_NUM_OBJECTS (a
) > 1)
2276 length
/= ALLOCNO_NUM_OBJECTS (a
);
2279 allocno_priorities
[ALLOCNO_NUM (a
)]
2280 = allocno_priorities
[ALLOCNO_NUM (a
)] * mult
/ length
;
2284 /* Sort allocnos according to the profit of usage of a hard register
2285 instead of memory for them. */
2287 allocno_cost_compare_func (const void *v1p
, const void *v2p
)
2289 ira_allocno_t p1
= *(const ira_allocno_t
*) v1p
;
2290 ira_allocno_t p2
= *(const ira_allocno_t
*) v2p
;
2293 c1
= ALLOCNO_UPDATED_MEMORY_COST (p1
) - ALLOCNO_UPDATED_CLASS_COST (p1
);
2294 c2
= ALLOCNO_UPDATED_MEMORY_COST (p2
) - ALLOCNO_UPDATED_CLASS_COST (p2
);
2298 /* If regs are equally good, sort by allocno numbers, so that the
2299 results of qsort leave nothing to chance. */
2300 return ALLOCNO_NUM (p1
) - ALLOCNO_NUM (p2
);
2303 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2304 possible to hard registers. Let us try to improve allocation with
2305 cost point of view. This function improves the allocation by
2306 spilling some allocnos and assigning the freed hard registers to
2307 other allocnos if it decreases the overall allocation cost. */
2309 improve_allocation (void)
2312 int j
, k
, n
, hregno
, conflict_hregno
, base_cost
, class_size
, word
, nwords
;
2313 int check
, spill_cost
, min_cost
, nregs
, conflict_nregs
, r
, best
;
2315 enum reg_class aclass
;
2316 enum machine_mode mode
;
2318 int costs
[FIRST_PSEUDO_REGISTER
];
2319 HARD_REG_SET conflicting_regs
[2], profitable_hard_regs
;
2323 /* Clear counts used to process conflicting allocnos only once for
2325 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2326 ALLOCNO_COLOR_DATA (ira_allocnos
[i
])->temp
= 0;
2328 /* Process each allocno and try to assign a hard register to it by
2329 spilling some its conflicting allocnos. */
2330 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2332 a
= ira_allocnos
[i
];
2333 ALLOCNO_COLOR_DATA (a
)->temp
= 0;
2334 if (empty_profitable_hard_regs (a
))
2337 aclass
= ALLOCNO_CLASS (a
);
2338 allocno_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
);
2339 if (allocno_costs
== NULL
)
2340 allocno_costs
= ALLOCNO_HARD_REG_COSTS (a
);
2341 if ((hregno
= ALLOCNO_HARD_REGNO (a
)) < 0)
2342 base_cost
= ALLOCNO_UPDATED_MEMORY_COST (a
);
2343 else if (allocno_costs
== NULL
)
2344 /* It means that assigning a hard register is not profitable
2345 (we don't waste memory for hard register costs in this
2349 base_cost
= allocno_costs
[ira_class_hard_reg_index
[aclass
][hregno
]];
2351 get_conflict_and_start_profitable_regs (a
, false,
2353 &profitable_hard_regs
);
2354 class_size
= ira_class_hard_regs_num
[aclass
];
2355 /* Set up cost improvement for usage of each profitable hard
2356 register for allocno A. */
2357 for (j
= 0; j
< class_size
; j
++)
2359 hregno
= ira_class_hard_regs
[aclass
][j
];
2360 if (! check_hard_reg_p (a
, hregno
,
2361 conflicting_regs
, profitable_hard_regs
))
2363 ira_assert (ira_class_hard_reg_index
[aclass
][hregno
] == j
);
2364 k
= allocno_costs
== NULL
? 0 : j
;
2365 costs
[hregno
] = (allocno_costs
== NULL
2366 ? ALLOCNO_UPDATED_CLASS_COST (a
) : allocno_costs
[k
]);
2367 costs
[hregno
] -= base_cost
;
2368 if (costs
[hregno
] < 0)
2372 /* There is no chance to improve the allocation cost by
2373 assigning hard register to allocno A even without spilling
2374 conflicting allocnos. */
2376 mode
= ALLOCNO_MODE (a
);
2377 nwords
= ALLOCNO_NUM_OBJECTS (a
);
2378 /* Process each allocno conflicting with A and update the cost
2379 improvement for profitable hard registers of A. To use a
2380 hard register for A we need to spill some conflicting
2381 allocnos and that creates penalty for the cost
2383 for (word
= 0; word
< nwords
; word
++)
2385 ira_object_t conflict_obj
;
2386 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
2387 ira_object_conflict_iterator oci
;
2389 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2391 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2393 if (ALLOCNO_COLOR_DATA (conflict_a
)->temp
== check
)
2394 /* We already processed this conflicting allocno
2395 because we processed earlier another object of the
2396 conflicting allocno. */
2398 ALLOCNO_COLOR_DATA (conflict_a
)->temp
= check
;
2399 if ((conflict_hregno
= ALLOCNO_HARD_REGNO (conflict_a
)) < 0)
2401 spill_cost
= ALLOCNO_UPDATED_MEMORY_COST (conflict_a
);
2402 k
= (ira_class_hard_reg_index
2403 [ALLOCNO_CLASS (conflict_a
)][conflict_hregno
]);
2404 ira_assert (k
>= 0);
2405 if ((allocno_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a
))
2407 spill_cost
-= allocno_costs
[k
];
2408 else if ((allocno_costs
= ALLOCNO_HARD_REG_COSTS (conflict_a
))
2410 spill_cost
-= allocno_costs
[k
];
2412 spill_cost
-= ALLOCNO_UPDATED_CLASS_COST (conflict_a
);
2414 = hard_regno_nregs
[conflict_hregno
][ALLOCNO_MODE (conflict_a
)];
2415 for (r
= conflict_hregno
;
2416 r
>= 0 && r
+ hard_regno_nregs
[r
][mode
] > conflict_hregno
;
2418 if (check_hard_reg_p (a
, r
,
2419 conflicting_regs
, profitable_hard_regs
))
2420 costs
[r
] += spill_cost
;
2421 for (r
= conflict_hregno
+ 1;
2422 r
< conflict_hregno
+ conflict_nregs
;
2424 if (check_hard_reg_p (a
, r
,
2425 conflicting_regs
, profitable_hard_regs
))
2426 costs
[r
] += spill_cost
;
2431 /* Now we choose hard register for A which results in highest
2432 allocation cost improvement. */
2433 for (j
= 0; j
< class_size
; j
++)
2435 hregno
= ira_class_hard_regs
[aclass
][j
];
2436 if (check_hard_reg_p (a
, hregno
,
2437 conflicting_regs
, profitable_hard_regs
)
2438 && min_cost
> costs
[hregno
])
2441 min_cost
= costs
[hregno
];
2445 /* We are in a situation when assigning any hard register to A
2446 by spilling some conflicting allocnos does not improve the
2449 nregs
= hard_regno_nregs
[best
][mode
];
2450 /* Now spill conflicting allocnos which contain a hard register
2451 of A when we assign the best chosen hard register to it. */
2452 for (word
= 0; word
< nwords
; word
++)
2454 ira_object_t conflict_obj
;
2455 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
2456 ira_object_conflict_iterator oci
;
2458 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2460 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2462 if ((conflict_hregno
= ALLOCNO_HARD_REGNO (conflict_a
)) < 0)
2465 = hard_regno_nregs
[conflict_hregno
][ALLOCNO_MODE (conflict_a
)];
2466 if (best
+ nregs
<= conflict_hregno
2467 || conflict_hregno
+ conflict_nregs
<= best
)
2468 /* No intersection. */
2470 ALLOCNO_HARD_REGNO (conflict_a
) = -1;
2471 sorted_allocnos
[n
++] = conflict_a
;
2472 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2473 fprintf (ira_dump_file
, "Spilling a%dr%d for a%dr%d\n",
2474 ALLOCNO_NUM (conflict_a
), ALLOCNO_REGNO (conflict_a
),
2475 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
2478 /* Assign the best chosen hard register to A. */
2479 ALLOCNO_HARD_REGNO (a
) = best
;
2480 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2481 fprintf (ira_dump_file
, "Assigning %d to a%dr%d\n",
2482 best
, ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
2486 /* We spilled some allocnos to assign their hard registers to other
2487 allocnos. The spilled allocnos are now in array
2488 'sorted_allocnos'. There is still a possibility that some of the
2489 spilled allocnos can get hard registers. So let us try assign
2490 them hard registers again (just a reminder -- function
2491 'assign_hard_reg' assigns hard registers only if it is possible
2492 and profitable). We process the spilled allocnos with biggest
2493 benefit to get hard register first -- see function
2494 'allocno_cost_compare_func'. */
2495 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
),
2496 allocno_cost_compare_func
);
2497 for (j
= 0; j
< n
; j
++)
2499 a
= sorted_allocnos
[j
];
2500 ALLOCNO_ASSIGNED_P (a
) = false;
2501 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2503 fprintf (ira_dump_file
, " ");
2504 ira_print_expanded_allocno (a
);
2505 fprintf (ira_dump_file
, " -- ");
2507 if (assign_hard_reg (a
, false))
2509 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2510 fprintf (ira_dump_file
, "assign hard reg %d\n",
2511 ALLOCNO_HARD_REGNO (a
));
2515 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2516 fprintf (ira_dump_file
, "assign memory\n");
2521 /* Sort allocnos according to their priorities. */
2523 allocno_priority_compare_func (const void *v1p
, const void *v2p
)
2525 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2526 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2529 pri1
= allocno_priorities
[ALLOCNO_NUM (a1
)];
2530 pri2
= allocno_priorities
[ALLOCNO_NUM (a2
)];
2532 return SORTGT (pri2
, pri1
);
2534 /* If regs are equally good, sort by allocnos, so that the results of
2535 qsort leave nothing to chance. */
2536 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2539 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2540 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2542 color_allocnos (void)
2548 setup_profitable_hard_regs ();
2549 if (flag_ira_algorithm
== IRA_ALGORITHM_PRIORITY
)
2552 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2554 a
= ira_allocnos
[i
];
2555 if (ALLOCNO_CLASS (a
) == NO_REGS
)
2557 ALLOCNO_HARD_REGNO (a
) = -1;
2558 ALLOCNO_ASSIGNED_P (a
) = true;
2559 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
2560 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
2561 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2563 fprintf (ira_dump_file
, " Spill");
2564 ira_print_expanded_allocno (a
);
2565 fprintf (ira_dump_file
, "\n");
2569 sorted_allocnos
[n
++] = a
;
2573 setup_allocno_priorities (sorted_allocnos
, n
);
2574 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
),
2575 allocno_priority_compare_func
);
2576 for (i
= 0; i
< n
; i
++)
2578 a
= sorted_allocnos
[i
];
2579 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2581 fprintf (ira_dump_file
, " ");
2582 ira_print_expanded_allocno (a
);
2583 fprintf (ira_dump_file
, " -- ");
2585 if (assign_hard_reg (a
, false))
2587 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2588 fprintf (ira_dump_file
, "assign hard reg %d\n",
2589 ALLOCNO_HARD_REGNO (a
));
2593 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2594 fprintf (ira_dump_file
, "assign memory\n");
2601 form_allocno_hard_regs_nodes_forest ();
2602 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2603 print_hard_regs_forest (ira_dump_file
);
2604 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2606 a
= ira_allocnos
[i
];
2607 if (ALLOCNO_CLASS (a
) != NO_REGS
&& ! empty_profitable_hard_regs (a
))
2608 ALLOCNO_COLOR_DATA (a
)->in_graph_p
= true;
2611 ALLOCNO_HARD_REGNO (a
) = -1;
2612 ALLOCNO_ASSIGNED_P (a
) = true;
2613 /* We don't need updated costs anymore. */
2614 ira_free_allocno_updated_costs (a
);
2615 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2617 fprintf (ira_dump_file
, " Spill");
2618 ira_print_expanded_allocno (a
);
2619 fprintf (ira_dump_file
, "\n");
2623 /* Put the allocnos into the corresponding buckets. */
2624 colorable_allocno_bucket
= NULL
;
2625 uncolorable_allocno_bucket
= NULL
;
2626 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2628 a
= ira_allocnos
[i
];
2629 if (ALLOCNO_COLOR_DATA (a
)->in_graph_p
)
2630 put_allocno_into_bucket (a
);
2632 push_allocnos_to_stack ();
2633 pop_allocnos_from_stack ();
2634 finish_allocno_hard_regs_nodes_forest ();
2636 improve_allocation ();
2641 /* Output information about the loop given by its LOOP_TREE_NODE. */
2643 print_loop_title (ira_loop_tree_node_t loop_tree_node
)
2647 ira_loop_tree_node_t subloop_node
, dest_loop_node
;
2651 if (loop_tree_node
->parent
== NULL
)
2652 fprintf (ira_dump_file
,
2653 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
2657 ira_assert (current_loops
!= NULL
&& loop_tree_node
->loop
!= NULL
);
2658 fprintf (ira_dump_file
,
2659 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
2660 loop_tree_node
->loop_num
, loop_tree_node
->parent
->loop_num
,
2661 loop_tree_node
->loop
->header
->index
,
2662 loop_depth (loop_tree_node
->loop
));
2664 for (subloop_node
= loop_tree_node
->children
;
2665 subloop_node
!= NULL
;
2666 subloop_node
= subloop_node
->next
)
2667 if (subloop_node
->bb
!= NULL
)
2669 fprintf (ira_dump_file
, " %d", subloop_node
->bb
->index
);
2670 FOR_EACH_EDGE (e
, ei
, subloop_node
->bb
->succs
)
2671 if (e
->dest
!= EXIT_BLOCK_PTR
2672 && ((dest_loop_node
= IRA_BB_NODE (e
->dest
)->parent
)
2674 fprintf (ira_dump_file
, "(->%d:l%d)",
2675 e
->dest
->index
, dest_loop_node
->loop_num
);
2677 fprintf (ira_dump_file
, "\n all:");
2678 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->all_allocnos
, 0, j
, bi
)
2679 fprintf (ira_dump_file
, " %dr%d", j
, ALLOCNO_REGNO (ira_allocnos
[j
]));
2680 fprintf (ira_dump_file
, "\n modified regnos:");
2681 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->modified_regnos
, 0, j
, bi
)
2682 fprintf (ira_dump_file
, " %d", j
);
2683 fprintf (ira_dump_file
, "\n border:");
2684 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->border_allocnos
, 0, j
, bi
)
2685 fprintf (ira_dump_file
, " %dr%d", j
, ALLOCNO_REGNO (ira_allocnos
[j
]));
2686 fprintf (ira_dump_file
, "\n Pressure:");
2687 for (j
= 0; (int) j
< ira_pressure_classes_num
; j
++)
2689 enum reg_class pclass
;
2691 pclass
= ira_pressure_classes
[j
];
2692 if (loop_tree_node
->reg_pressure
[pclass
] == 0)
2694 fprintf (ira_dump_file
, " %s=%d", reg_class_names
[pclass
],
2695 loop_tree_node
->reg_pressure
[pclass
]);
2697 fprintf (ira_dump_file
, "\n");
2700 /* Color the allocnos inside loop (in the extreme case it can be all
2701 of the function) given the corresponding LOOP_TREE_NODE. The
2702 function is called for each loop during top-down traverse of the
2705 color_pass (ira_loop_tree_node_t loop_tree_node
)
2707 int regno
, hard_regno
, index
= -1, n
;
2708 int cost
, exit_freq
, enter_freq
;
2711 enum machine_mode mode
;
2712 enum reg_class rclass
, aclass
, pclass
;
2713 ira_allocno_t a
, subloop_allocno
;
2714 ira_loop_tree_node_t subloop_node
;
2716 ira_assert (loop_tree_node
->bb
== NULL
);
2717 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2718 print_loop_title (loop_tree_node
);
2720 bitmap_copy (coloring_allocno_bitmap
, loop_tree_node
->all_allocnos
);
2721 bitmap_copy (consideration_allocno_bitmap
, coloring_allocno_bitmap
);
2723 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
2725 a
= ira_allocnos
[j
];
2727 if (! ALLOCNO_ASSIGNED_P (a
))
2729 bitmap_clear_bit (coloring_allocno_bitmap
, ALLOCNO_NUM (a
));
2732 = (allocno_color_data_t
) ira_allocate (sizeof (struct allocno_color_data
)
2734 memset (allocno_color_data
, 0, sizeof (struct allocno_color_data
) * n
);
2735 curr_allocno_process
= 0;
2737 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
2739 a
= ira_allocnos
[j
];
2740 ALLOCNO_ADD_DATA (a
) = allocno_color_data
+ n
;
2743 /* Color all mentioned allocnos including transparent ones. */
2745 /* Process caps. They are processed just once. */
2746 if (flag_ira_region
== IRA_REGION_MIXED
2747 || flag_ira_region
== IRA_REGION_ALL
)
2748 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->all_allocnos
, 0, j
, bi
)
2750 a
= ira_allocnos
[j
];
2751 if (ALLOCNO_CAP_MEMBER (a
) == NULL
)
2753 /* Remove from processing in the next loop. */
2754 bitmap_clear_bit (consideration_allocno_bitmap
, j
);
2755 rclass
= ALLOCNO_CLASS (a
);
2756 pclass
= ira_pressure_class_translate
[rclass
];
2757 if (flag_ira_region
== IRA_REGION_MIXED
2758 && (loop_tree_node
->reg_pressure
[pclass
]
2759 <= ira_class_hard_regs_num
[pclass
]))
2761 mode
= ALLOCNO_MODE (a
);
2762 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2763 if (hard_regno
>= 0)
2765 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
2766 ira_assert (index
>= 0);
2768 regno
= ALLOCNO_REGNO (a
);
2769 subloop_allocno
= ALLOCNO_CAP_MEMBER (a
);
2770 subloop_node
= ALLOCNO_LOOP_TREE_NODE (subloop_allocno
);
2771 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno
));
2772 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
2773 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
2774 if (hard_regno
>= 0)
2775 update_copy_costs (subloop_allocno
, true);
2776 /* We don't need updated costs anymore: */
2777 ira_free_allocno_updated_costs (subloop_allocno
);
2780 /* Update costs of the corresponding allocnos (not caps) in the
2782 for (subloop_node
= loop_tree_node
->subloops
;
2783 subloop_node
!= NULL
;
2784 subloop_node
= subloop_node
->subloop_next
)
2786 ira_assert (subloop_node
->bb
== NULL
);
2787 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
2789 a
= ira_allocnos
[j
];
2790 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
2791 mode
= ALLOCNO_MODE (a
);
2792 rclass
= ALLOCNO_CLASS (a
);
2793 pclass
= ira_pressure_class_translate
[rclass
];
2794 hard_regno
= ALLOCNO_HARD_REGNO (a
);
2795 /* Use hard register class here. ??? */
2796 if (hard_regno
>= 0)
2798 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
2799 ira_assert (index
>= 0);
2801 regno
= ALLOCNO_REGNO (a
);
2802 /* ??? conflict costs */
2803 subloop_allocno
= subloop_node
->regno_allocno_map
[regno
];
2804 if (subloop_allocno
== NULL
2805 || ALLOCNO_CAP (subloop_allocno
) != NULL
)
2807 ira_assert (ALLOCNO_CLASS (subloop_allocno
) == rclass
);
2808 ira_assert (bitmap_bit_p (subloop_node
->all_allocnos
,
2809 ALLOCNO_NUM (subloop_allocno
)));
2810 if ((flag_ira_region
== IRA_REGION_MIXED
)
2811 && (loop_tree_node
->reg_pressure
[pclass
]
2812 <= ira_class_hard_regs_num
[pclass
]))
2814 if (! ALLOCNO_ASSIGNED_P (subloop_allocno
))
2816 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
2817 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
2818 if (hard_regno
>= 0)
2819 update_copy_costs (subloop_allocno
, true);
2820 /* We don't need updated costs anymore: */
2821 ira_free_allocno_updated_costs (subloop_allocno
);
2825 exit_freq
= ira_loop_edge_freq (subloop_node
, regno
, true);
2826 enter_freq
= ira_loop_edge_freq (subloop_node
, regno
, false);
2827 ira_assert (regno
< ira_reg_equiv_len
);
2828 if (ira_equiv_no_lvalue_p (regno
))
2830 if (! ALLOCNO_ASSIGNED_P (subloop_allocno
))
2832 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
2833 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
2834 if (hard_regno
>= 0)
2835 update_copy_costs (subloop_allocno
, true);
2836 /* We don't need updated costs anymore: */
2837 ira_free_allocno_updated_costs (subloop_allocno
);
2840 else if (hard_regno
< 0)
2842 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno
)
2843 -= ((ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
)
2844 + (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
));
2848 aclass
= ALLOCNO_CLASS (subloop_allocno
);
2849 ira_init_register_move_cost_if_necessary (mode
);
2850 cost
= (ira_register_move_cost
[mode
][rclass
][rclass
]
2851 * (exit_freq
+ enter_freq
));
2852 ira_allocate_and_set_or_copy_costs
2853 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
), aclass
,
2854 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
),
2855 ALLOCNO_HARD_REG_COSTS (subloop_allocno
));
2856 ira_allocate_and_set_or_copy_costs
2857 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno
),
2858 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno
));
2859 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
] -= cost
;
2860 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno
)[index
]
2862 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
)
2863 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
])
2864 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
)
2865 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
];
2866 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno
)
2867 += (ira_memory_move_cost
[mode
][rclass
][0] * enter_freq
2868 + ira_memory_move_cost
[mode
][rclass
][1] * exit_freq
);
2872 ira_free (allocno_color_data
);
2873 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, j
, bi
)
2875 a
= ira_allocnos
[j
];
2876 ALLOCNO_ADD_DATA (a
) = NULL
;
2880 /* Initialize the common data for coloring and calls functions to do
2881 Chaitin-Briggs and regional coloring. */
2885 coloring_allocno_bitmap
= ira_allocate_bitmap ();
2886 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
2887 fprintf (ira_dump_file
, "\n**** Allocnos coloring:\n\n");
2889 ira_traverse_loop_tree (false, ira_loop_tree_root
, color_pass
, NULL
);
2891 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
2892 ira_print_disposition (ira_dump_file
);
2894 ira_free_bitmap (coloring_allocno_bitmap
);
2899 /* Move spill/restore code, which are to be generated in ira-emit.c,
2900 to less frequent points (if it is profitable) by reassigning some
2901 allocnos (in loop with subloops containing in another loop) to
2902 memory which results in longer live-range where the corresponding
2903 pseudo-registers will be in memory. */
2905 move_spill_restore (void)
2907 int cost
, regno
, hard_regno
, hard_regno2
, index
;
2909 int enter_freq
, exit_freq
;
2910 enum machine_mode mode
;
2911 enum reg_class rclass
;
2912 ira_allocno_t a
, parent_allocno
, subloop_allocno
;
2913 ira_loop_tree_node_t parent
, loop_node
, subloop_node
;
2914 ira_allocno_iterator ai
;
2919 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
2920 fprintf (ira_dump_file
, "New iteration of spill/restore move\n");
2921 FOR_EACH_ALLOCNO (a
, ai
)
2923 regno
= ALLOCNO_REGNO (a
);
2924 loop_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2925 if (ALLOCNO_CAP_MEMBER (a
) != NULL
2926 || ALLOCNO_CAP (a
) != NULL
2927 || (hard_regno
= ALLOCNO_HARD_REGNO (a
)) < 0
2928 || loop_node
->children
== NULL
2929 /* don't do the optimization because it can create
2930 copies and the reload pass can spill the allocno set
2931 by copy although the allocno will not get memory
2933 || ira_equiv_no_lvalue_p (regno
)
2934 || !bitmap_bit_p (loop_node
->border_allocnos
, ALLOCNO_NUM (a
)))
2936 mode
= ALLOCNO_MODE (a
);
2937 rclass
= ALLOCNO_CLASS (a
);
2938 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
2939 ira_assert (index
>= 0);
2940 cost
= (ALLOCNO_MEMORY_COST (a
)
2941 - (ALLOCNO_HARD_REG_COSTS (a
) == NULL
2942 ? ALLOCNO_CLASS_COST (a
)
2943 : ALLOCNO_HARD_REG_COSTS (a
)[index
]));
2944 ira_init_register_move_cost_if_necessary (mode
);
2945 for (subloop_node
= loop_node
->subloops
;
2946 subloop_node
!= NULL
;
2947 subloop_node
= subloop_node
->subloop_next
)
2949 ira_assert (subloop_node
->bb
== NULL
);
2950 subloop_allocno
= subloop_node
->regno_allocno_map
[regno
];
2951 if (subloop_allocno
== NULL
)
2953 ira_assert (rclass
== ALLOCNO_CLASS (subloop_allocno
));
2954 /* We have accumulated cost. To get the real cost of
2955 allocno usage in the loop we should subtract costs of
2956 the subloop allocnos. */
2957 cost
-= (ALLOCNO_MEMORY_COST (subloop_allocno
)
2958 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno
) == NULL
2959 ? ALLOCNO_CLASS_COST (subloop_allocno
)
2960 : ALLOCNO_HARD_REG_COSTS (subloop_allocno
)[index
]));
2961 exit_freq
= ira_loop_edge_freq (subloop_node
, regno
, true);
2962 enter_freq
= ira_loop_edge_freq (subloop_node
, regno
, false);
2963 if ((hard_regno2
= ALLOCNO_HARD_REGNO (subloop_allocno
)) < 0)
2964 cost
-= (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
2965 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
2969 += (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
2970 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
2971 if (hard_regno2
!= hard_regno
)
2972 cost
-= (ira_register_move_cost
[mode
][rclass
][rclass
]
2973 * (exit_freq
+ enter_freq
));
2976 if ((parent
= loop_node
->parent
) != NULL
2977 && (parent_allocno
= parent
->regno_allocno_map
[regno
]) != NULL
)
2979 ira_assert (rclass
== ALLOCNO_CLASS (parent_allocno
));
2980 exit_freq
= ira_loop_edge_freq (loop_node
, regno
, true);
2981 enter_freq
= ira_loop_edge_freq (loop_node
, regno
, false);
2982 if ((hard_regno2
= ALLOCNO_HARD_REGNO (parent_allocno
)) < 0)
2983 cost
-= (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
2984 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
2988 += (ira_memory_move_cost
[mode
][rclass
][1] * exit_freq
2989 + ira_memory_move_cost
[mode
][rclass
][0] * enter_freq
);
2990 if (hard_regno2
!= hard_regno
)
2991 cost
-= (ira_register_move_cost
[mode
][rclass
][rclass
]
2992 * (exit_freq
+ enter_freq
));
2997 ALLOCNO_HARD_REGNO (a
) = -1;
2998 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3002 " Moving spill/restore for a%dr%d up from loop %d",
3003 ALLOCNO_NUM (a
), regno
, loop_node
->loop_num
);
3004 fprintf (ira_dump_file
, " - profit %d\n", -cost
);
3016 /* Update current hard reg costs and current conflict hard reg costs
3017 for allocno A. It is done by processing its copies containing
3018 other allocnos already assigned. */
3020 update_curr_costs (ira_allocno_t a
)
3022 int i
, hard_regno
, cost
;
3023 enum machine_mode mode
;
3024 enum reg_class aclass
, rclass
;
3025 ira_allocno_t another_a
;
3026 ira_copy_t cp
, next_cp
;
3028 ira_free_allocno_updated_costs (a
);
3029 ira_assert (! ALLOCNO_ASSIGNED_P (a
));
3030 aclass
= ALLOCNO_CLASS (a
);
3031 if (aclass
== NO_REGS
)
3033 mode
= ALLOCNO_MODE (a
);
3034 ira_init_register_move_cost_if_necessary (mode
);
3035 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
3039 next_cp
= cp
->next_first_allocno_copy
;
3040 another_a
= cp
->second
;
3042 else if (cp
->second
== a
)
3044 next_cp
= cp
->next_second_allocno_copy
;
3045 another_a
= cp
->first
;
3049 if (! ira_reg_classes_intersect_p
[aclass
][ALLOCNO_CLASS (another_a
)]
3050 || ! ALLOCNO_ASSIGNED_P (another_a
)
3051 || (hard_regno
= ALLOCNO_HARD_REGNO (another_a
)) < 0)
3053 rclass
= REGNO_REG_CLASS (hard_regno
);
3054 i
= ira_class_hard_reg_index
[aclass
][hard_regno
];
3057 cost
= (cp
->first
== a
3058 ? ira_register_move_cost
[mode
][rclass
][aclass
]
3059 : ira_register_move_cost
[mode
][aclass
][rclass
]);
3060 ira_allocate_and_set_or_copy_costs
3061 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
, ALLOCNO_CLASS_COST (a
),
3062 ALLOCNO_HARD_REG_COSTS (a
));
3063 ira_allocate_and_set_or_copy_costs
3064 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
3065 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
3066 ALLOCNO_UPDATED_HARD_REG_COSTS (a
)[i
] -= cp
->freq
* cost
;
3067 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
)[i
] -= cp
->freq
* cost
;
3071 /* Try to assign hard registers to the unassigned allocnos and
3072 allocnos conflicting with them or conflicting with allocnos whose
3073 regno >= START_REGNO. The function is called after ira_flattening,
3074 so more allocnos (including ones created in ira-emit.c) will have a
3075 chance to get a hard register. We use simple assignment algorithm
3076 based on priorities. */
3078 ira_reassign_conflict_allocnos (int start_regno
)
3080 int i
, allocnos_to_color_num
;
3082 enum reg_class aclass
;
3083 bitmap allocnos_to_color
;
3084 ira_allocno_iterator ai
;
3086 allocnos_to_color
= ira_allocate_bitmap ();
3087 allocnos_to_color_num
= 0;
3088 FOR_EACH_ALLOCNO (a
, ai
)
3090 int n
= ALLOCNO_NUM_OBJECTS (a
);
3092 if (! ALLOCNO_ASSIGNED_P (a
)
3093 && ! bitmap_bit_p (allocnos_to_color
, ALLOCNO_NUM (a
)))
3095 if (ALLOCNO_CLASS (a
) != NO_REGS
)
3096 sorted_allocnos
[allocnos_to_color_num
++] = a
;
3099 ALLOCNO_ASSIGNED_P (a
) = true;
3100 ALLOCNO_HARD_REGNO (a
) = -1;
3101 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3102 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3104 bitmap_set_bit (allocnos_to_color
, ALLOCNO_NUM (a
));
3106 if (ALLOCNO_REGNO (a
) < start_regno
3107 || (aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
)
3109 for (i
= 0; i
< n
; i
++)
3111 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3112 ira_object_t conflict_obj
;
3113 ira_object_conflict_iterator oci
;
3115 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
3117 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
3119 ira_assert (ira_reg_classes_intersect_p
3120 [aclass
][ALLOCNO_CLASS (conflict_a
)]);
3121 if (!bitmap_set_bit (allocnos_to_color
, ALLOCNO_NUM (conflict_a
)))
3123 sorted_allocnos
[allocnos_to_color_num
++] = conflict_a
;
3127 ira_free_bitmap (allocnos_to_color
);
3128 if (allocnos_to_color_num
> 1)
3130 setup_allocno_priorities (sorted_allocnos
, allocnos_to_color_num
);
3131 qsort (sorted_allocnos
, allocnos_to_color_num
, sizeof (ira_allocno_t
),
3132 allocno_priority_compare_func
);
3134 for (i
= 0; i
< allocnos_to_color_num
; i
++)
3136 a
= sorted_allocnos
[i
];
3137 ALLOCNO_ASSIGNED_P (a
) = false;
3138 update_curr_costs (a
);
3140 for (i
= 0; i
< allocnos_to_color_num
; i
++)
3142 a
= sorted_allocnos
[i
];
3143 if (assign_hard_reg (a
, true))
3145 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3148 " Secondary allocation: assign hard reg %d to reg %d\n",
3149 ALLOCNO_HARD_REGNO (a
), ALLOCNO_REGNO (a
));
3156 /* This page contains functions used to find conflicts using allocno
3159 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
3160 used to find a conflict for new allocnos or allocnos with the
3161 different allocno classes. */
3163 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1
, ira_allocno_t a2
)
3167 int n1
= ALLOCNO_NUM_OBJECTS (a1
);
3168 int n2
= ALLOCNO_NUM_OBJECTS (a2
);
3172 reg1
= regno_reg_rtx
[ALLOCNO_REGNO (a1
)];
3173 reg2
= regno_reg_rtx
[ALLOCNO_REGNO (a2
)];
3174 if (reg1
!= NULL
&& reg2
!= NULL
3175 && ORIGINAL_REGNO (reg1
) == ORIGINAL_REGNO (reg2
))
3178 for (i
= 0; i
< n1
; i
++)
3180 ira_object_t c1
= ALLOCNO_OBJECT (a1
, i
);
3182 for (j
= 0; j
< n2
; j
++)
3184 ira_object_t c2
= ALLOCNO_OBJECT (a2
, j
);
3186 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1
),
3187 OBJECT_LIVE_RANGES (c2
)))
3194 #ifdef ENABLE_IRA_CHECKING
3196 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3197 intersect. This should be used when there is only one region.
3198 Currently this is used during reload. */
3200 conflict_by_live_ranges_p (int regno1
, int regno2
)
3202 ira_allocno_t a1
, a2
;
3204 ira_assert (regno1
>= FIRST_PSEUDO_REGISTER
3205 && regno2
>= FIRST_PSEUDO_REGISTER
);
3206 /* Reg info caclulated by dataflow infrastructure can be different
3207 from one calculated by regclass. */
3208 if ((a1
= ira_loop_tree_root
->regno_allocno_map
[regno1
]) == NULL
3209 || (a2
= ira_loop_tree_root
->regno_allocno_map
[regno2
]) == NULL
)
3211 return allocnos_conflict_by_live_ranges_p (a1
, a2
);
3218 /* This page contains code to coalesce memory stack slots used by
3219 spilled allocnos. This results in smaller stack frame, better data
3220 locality, and in smaller code for some architectures like
3221 x86/x86_64 where insn size depends on address displacement value.
3222 On the other hand, it can worsen insn scheduling after the RA but
3223 in practice it is less important than smaller stack frames. */
3225 /* TRUE if we coalesced some allocnos. In other words, if we got
3226 loops formed by members first_coalesced_allocno and
3227 next_coalesced_allocno containing more one allocno. */
3228 static bool allocno_coalesced_p
;
3230 /* Bitmap used to prevent a repeated allocno processing because of
3232 static bitmap processed_coalesced_allocno_bitmap
;
3235 typedef struct coalesce_data
*coalesce_data_t
;
3237 /* To decrease footprint of ira_allocno structure we store all data
3238 needed only for coalescing in the following structure. */
3239 struct coalesce_data
3241 /* Coalesced allocnos form a cyclic list. One allocno given by
3242 FIRST represents all coalesced allocnos. The
3243 list is chained by NEXT. */
3244 ira_allocno_t first
;
3249 /* Container for storing allocno data concerning coalescing. */
3250 static coalesce_data_t allocno_coalesce_data
;
3252 /* Macro to access the data concerning coalescing. */
3253 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3255 /* The function is used to sort allocnos according to their execution
3258 copy_freq_compare_func (const void *v1p
, const void *v2p
)
3260 ira_copy_t cp1
= *(const ira_copy_t
*) v1p
, cp2
= *(const ira_copy_t
*) v2p
;
3268 /* If freqencies are equal, sort by copies, so that the results of
3269 qsort leave nothing to chance. */
3270 return cp1
->num
- cp2
->num
;
3273 /* Merge two sets of coalesced allocnos given correspondingly by
3274 allocnos A1 and A2 (more accurately merging A2 set into A1
3277 merge_allocnos (ira_allocno_t a1
, ira_allocno_t a2
)
3279 ira_allocno_t a
, first
, last
, next
;
3281 first
= ALLOCNO_COALESCE_DATA (a1
)->first
;
3282 a
= ALLOCNO_COALESCE_DATA (a2
)->first
;
3285 for (last
= a2
, a
= ALLOCNO_COALESCE_DATA (a2
)->next
;;
3286 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3288 ALLOCNO_COALESCE_DATA (a
)->first
= first
;
3293 next
= allocno_coalesce_data
[ALLOCNO_NUM (first
)].next
;
3294 allocno_coalesce_data
[ALLOCNO_NUM (first
)].next
= a2
;
3295 allocno_coalesce_data
[ALLOCNO_NUM (last
)].next
= next
;
3298 /* Return TRUE if there are conflicting allocnos from two sets of
3299 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3300 use live ranges to find conflicts because conflicts are represented
3301 only for allocnos of the same allocno class and during the reload
3302 pass we coalesce allocnos for sharing stack memory slots. */
3304 coalesced_allocno_conflict_p (ira_allocno_t a1
, ira_allocno_t a2
)
3306 ira_allocno_t a
, conflict_a
;
3308 if (allocno_coalesced_p
)
3310 bitmap_clear (processed_coalesced_allocno_bitmap
);
3311 for (a
= ALLOCNO_COALESCE_DATA (a1
)->next
;;
3312 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3314 bitmap_set_bit (processed_coalesced_allocno_bitmap
, ALLOCNO_NUM (a
));
3319 for (a
= ALLOCNO_COALESCE_DATA (a2
)->next
;;
3320 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3322 for (conflict_a
= ALLOCNO_COALESCE_DATA (a1
)->next
;;
3323 conflict_a
= ALLOCNO_COALESCE_DATA (conflict_a
)->next
)
3325 if (allocnos_conflict_by_live_ranges_p (a
, conflict_a
))
3327 if (conflict_a
== a1
)
3336 /* The major function for aggressive allocno coalescing. We coalesce
3337 only spilled allocnos. If some allocnos have been coalesced, we
3338 set up flag allocno_coalesced_p. */
3340 coalesce_allocnos (void)
3343 ira_copy_t cp
, next_cp
, *sorted_copies
;
3345 int i
, n
, cp_num
, regno
;
3348 sorted_copies
= (ira_copy_t
*) ira_allocate (ira_copies_num
3349 * sizeof (ira_copy_t
));
3351 /* Collect copies. */
3352 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, j
, bi
)
3354 a
= ira_allocnos
[j
];
3355 regno
= ALLOCNO_REGNO (a
);
3356 if (! ALLOCNO_ASSIGNED_P (a
) || ALLOCNO_HARD_REGNO (a
) >= 0
3357 || ira_equiv_no_lvalue_p (regno
))
3359 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
3363 next_cp
= cp
->next_first_allocno_copy
;
3364 regno
= ALLOCNO_REGNO (cp
->second
);
3365 /* For priority coloring we coalesce allocnos only with
3366 the same allocno class not with intersected allocno
3367 classes as it were possible. It is done for
3369 if ((cp
->insn
!= NULL
|| cp
->constraint_p
)
3370 && ALLOCNO_ASSIGNED_P (cp
->second
)
3371 && ALLOCNO_HARD_REGNO (cp
->second
) < 0
3372 && ! ira_equiv_no_lvalue_p (regno
))
3373 sorted_copies
[cp_num
++] = cp
;
3375 else if (cp
->second
== a
)
3376 next_cp
= cp
->next_second_allocno_copy
;
3381 qsort (sorted_copies
, cp_num
, sizeof (ira_copy_t
), copy_freq_compare_func
);
3382 /* Coalesced copies, most frequently executed first. */
3383 for (; cp_num
!= 0;)
3385 for (i
= 0; i
< cp_num
; i
++)
3387 cp
= sorted_copies
[i
];
3388 if (! coalesced_allocno_conflict_p (cp
->first
, cp
->second
))
3390 allocno_coalesced_p
= true;
3391 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3394 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3395 cp
->num
, ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
3396 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
),
3398 merge_allocnos (cp
->first
, cp
->second
);
3403 /* Collect the rest of copies. */
3404 for (n
= 0; i
< cp_num
; i
++)
3406 cp
= sorted_copies
[i
];
3407 if (allocno_coalesce_data
[ALLOCNO_NUM (cp
->first
)].first
3408 != allocno_coalesce_data
[ALLOCNO_NUM (cp
->second
)].first
)
3409 sorted_copies
[n
++] = cp
;
3413 ira_free (sorted_copies
);
3416 /* Usage cost and order number of coalesced allocno set to which
3417 given pseudo register belongs to. */
3418 static int *regno_coalesced_allocno_cost
;
3419 static int *regno_coalesced_allocno_num
;
3421 /* Sort pseudos according frequencies of coalesced allocno sets they
3422 belong to (putting most frequently ones first), and according to
3423 coalesced allocno set order numbers. */
3425 coalesced_pseudo_reg_freq_compare (const void *v1p
, const void *v2p
)
3427 const int regno1
= *(const int *) v1p
;
3428 const int regno2
= *(const int *) v2p
;
3431 if ((diff
= (regno_coalesced_allocno_cost
[regno2
]
3432 - regno_coalesced_allocno_cost
[regno1
])) != 0)
3434 if ((diff
= (regno_coalesced_allocno_num
[regno1
]
3435 - regno_coalesced_allocno_num
[regno2
])) != 0)
3437 return regno1
- regno2
;
3440 /* Widest width in which each pseudo reg is referred to (via subreg).
3441 It is used for sorting pseudo registers. */
3442 static unsigned int *regno_max_ref_width
;
3444 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
3445 #ifdef STACK_GROWS_DOWNWARD
3446 # undef STACK_GROWS_DOWNWARD
3447 # define STACK_GROWS_DOWNWARD 1
3449 # define STACK_GROWS_DOWNWARD 0
3452 /* Sort pseudos according their slot numbers (putting ones with
3453 smaller numbers first, or last when the frame pointer is not
3456 coalesced_pseudo_reg_slot_compare (const void *v1p
, const void *v2p
)
3458 const int regno1
= *(const int *) v1p
;
3459 const int regno2
= *(const int *) v2p
;
3460 ira_allocno_t a1
= ira_regno_allocno_map
[regno1
];
3461 ira_allocno_t a2
= ira_regno_allocno_map
[regno2
];
3462 int diff
, slot_num1
, slot_num2
;
3463 int total_size1
, total_size2
;
3465 if (a1
== NULL
|| ALLOCNO_HARD_REGNO (a1
) >= 0)
3467 if (a2
== NULL
|| ALLOCNO_HARD_REGNO (a2
) >= 0)
3468 return regno1
- regno2
;
3471 else if (a2
== NULL
|| ALLOCNO_HARD_REGNO (a2
) >= 0)
3473 slot_num1
= -ALLOCNO_HARD_REGNO (a1
);
3474 slot_num2
= -ALLOCNO_HARD_REGNO (a2
);
3475 if ((diff
= slot_num1
- slot_num2
) != 0)
3476 return (frame_pointer_needed
3477 || !FRAME_GROWS_DOWNWARD
== STACK_GROWS_DOWNWARD
? diff
: -diff
);
3478 total_size1
= MAX (PSEUDO_REGNO_BYTES (regno1
),
3479 regno_max_ref_width
[regno1
]);
3480 total_size2
= MAX (PSEUDO_REGNO_BYTES (regno2
),
3481 regno_max_ref_width
[regno2
]);
3482 if ((diff
= total_size2
- total_size1
) != 0)
3484 return regno1
- regno2
;
3487 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3488 for coalesced allocno sets containing allocnos with their regnos
3489 given in array PSEUDO_REGNOS of length N. */
3491 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos
, int n
)
3493 int i
, num
, regno
, cost
;
3494 ira_allocno_t allocno
, a
;
3496 for (num
= i
= 0; i
< n
; i
++)
3498 regno
= pseudo_regnos
[i
];
3499 allocno
= ira_regno_allocno_map
[regno
];
3500 if (allocno
== NULL
)
3502 regno_coalesced_allocno_cost
[regno
] = 0;
3503 regno_coalesced_allocno_num
[regno
] = ++num
;
3506 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
)
3509 for (cost
= 0, a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3510 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3512 cost
+= ALLOCNO_FREQ (a
);
3516 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3517 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3519 regno_coalesced_allocno_num
[ALLOCNO_REGNO (a
)] = num
;
3520 regno_coalesced_allocno_cost
[ALLOCNO_REGNO (a
)] = cost
;
3527 /* Collect spilled allocnos representing coalesced allocno sets (the
3528 first coalesced allocno). The collected allocnos are returned
3529 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3530 number of the collected allocnos. The allocnos are given by their
3531 regnos in array PSEUDO_REGNOS of length N. */
3533 collect_spilled_coalesced_allocnos (int *pseudo_regnos
, int n
,
3534 ira_allocno_t
*spilled_coalesced_allocnos
)
3537 ira_allocno_t allocno
;
3539 for (num
= i
= 0; i
< n
; i
++)
3541 regno
= pseudo_regnos
[i
];
3542 allocno
= ira_regno_allocno_map
[regno
];
3543 if (allocno
== NULL
|| ALLOCNO_HARD_REGNO (allocno
) >= 0
3544 || ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
)
3546 spilled_coalesced_allocnos
[num
++] = allocno
;
3551 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3552 given slot contains live ranges of coalesced allocnos assigned to
3554 static live_range_t
*slot_coalesced_allocnos_live_ranges
;
3556 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3557 ranges intersected with live ranges of coalesced allocnos assigned
3558 to slot with number N. */
3560 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno
, int n
)
3564 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3565 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3568 int nr
= ALLOCNO_NUM_OBJECTS (a
);
3570 for (i
= 0; i
< nr
; i
++)
3572 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3574 if (ira_live_ranges_intersect_p
3575 (slot_coalesced_allocnos_live_ranges
[n
],
3576 OBJECT_LIVE_RANGES (obj
)))
3585 /* Update live ranges of slot to which coalesced allocnos represented
3586 by ALLOCNO were assigned. */
3588 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno
)
3594 n
= ALLOCNO_COALESCE_DATA (allocno
)->temp
;
3595 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3596 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3598 int nr
= ALLOCNO_NUM_OBJECTS (a
);
3599 for (i
= 0; i
< nr
; i
++)
3601 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3603 r
= ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj
));
3604 slot_coalesced_allocnos_live_ranges
[n
]
3605 = ira_merge_live_ranges
3606 (slot_coalesced_allocnos_live_ranges
[n
], r
);
3613 /* We have coalesced allocnos involving in copies. Coalesce allocnos
3614 further in order to share the same memory stack slot. Allocnos
3615 representing sets of allocnos coalesced before the call are given
3616 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
3617 some allocnos were coalesced in the function. */
3619 coalesce_spill_slots (ira_allocno_t
*spilled_coalesced_allocnos
, int num
)
3621 int i
, j
, n
, last_coalesced_allocno_num
;
3622 ira_allocno_t allocno
, a
;
3623 bool merged_p
= false;
3624 bitmap set_jump_crosses
= regstat_get_setjmp_crosses ();
3626 slot_coalesced_allocnos_live_ranges
3627 = (live_range_t
*) ira_allocate (sizeof (live_range_t
) * ira_allocnos_num
);
3628 memset (slot_coalesced_allocnos_live_ranges
, 0,
3629 sizeof (live_range_t
) * ira_allocnos_num
);
3630 last_coalesced_allocno_num
= 0;
3631 /* Coalesce non-conflicting spilled allocnos preferring most
3633 for (i
= 0; i
< num
; i
++)
3635 allocno
= spilled_coalesced_allocnos
[i
];
3636 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
3637 || bitmap_bit_p (set_jump_crosses
, ALLOCNO_REGNO (allocno
))
3638 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno
)))
3640 for (j
= 0; j
< i
; j
++)
3642 a
= spilled_coalesced_allocnos
[j
];
3643 n
= ALLOCNO_COALESCE_DATA (a
)->temp
;
3644 if (ALLOCNO_COALESCE_DATA (a
)->first
== a
3645 && ! bitmap_bit_p (set_jump_crosses
, ALLOCNO_REGNO (a
))
3646 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a
))
3647 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno
, n
))
3652 /* No coalescing: set up number for coalesced allocnos
3653 represented by ALLOCNO. */
3654 ALLOCNO_COALESCE_DATA (allocno
)->temp
= last_coalesced_allocno_num
++;
3655 setup_slot_coalesced_allocno_live_ranges (allocno
);
3659 allocno_coalesced_p
= true;
3661 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3662 fprintf (ira_dump_file
,
3663 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
3664 ALLOCNO_NUM (allocno
), ALLOCNO_REGNO (allocno
),
3665 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
3666 ALLOCNO_COALESCE_DATA (allocno
)->temp
3667 = ALLOCNO_COALESCE_DATA (a
)->temp
;
3668 setup_slot_coalesced_allocno_live_ranges (allocno
);
3669 merge_allocnos (a
, allocno
);
3670 ira_assert (ALLOCNO_COALESCE_DATA (a
)->first
== a
);
3673 for (i
= 0; i
< ira_allocnos_num
; i
++)
3674 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges
[i
]);
3675 ira_free (slot_coalesced_allocnos_live_ranges
);
3679 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
3680 subsequent assigning stack slots to them in the reload pass. To do
3681 this we coalesce spilled allocnos first to decrease the number of
3682 memory-memory move insns. This function is called by the
3685 ira_sort_regnos_for_alter_reg (int *pseudo_regnos
, int n
,
3686 unsigned int *reg_max_ref_width
)
3688 int max_regno
= max_reg_num ();
3689 int i
, regno
, num
, slot_num
;
3690 ira_allocno_t allocno
, a
;
3691 ira_allocno_iterator ai
;
3692 ira_allocno_t
*spilled_coalesced_allocnos
;
3694 /* Set up allocnos can be coalesced. */
3695 coloring_allocno_bitmap
= ira_allocate_bitmap ();
3696 for (i
= 0; i
< n
; i
++)
3698 regno
= pseudo_regnos
[i
];
3699 allocno
= ira_regno_allocno_map
[regno
];
3700 if (allocno
!= NULL
)
3701 bitmap_set_bit (coloring_allocno_bitmap
, ALLOCNO_NUM (allocno
));
3703 allocno_coalesced_p
= false;
3704 processed_coalesced_allocno_bitmap
= ira_allocate_bitmap ();
3705 allocno_coalesce_data
3706 = (coalesce_data_t
) ira_allocate (sizeof (struct coalesce_data
)
3707 * ira_allocnos_num
);
3708 /* Initialize coalesce data for allocnos. */
3709 FOR_EACH_ALLOCNO (a
, ai
)
3711 ALLOCNO_ADD_DATA (a
) = allocno_coalesce_data
+ ALLOCNO_NUM (a
);
3712 ALLOCNO_COALESCE_DATA (a
)->first
= a
;
3713 ALLOCNO_COALESCE_DATA (a
)->next
= a
;
3715 coalesce_allocnos ();
3716 ira_free_bitmap (coloring_allocno_bitmap
);
3717 regno_coalesced_allocno_cost
3718 = (int *) ira_allocate (max_regno
* sizeof (int));
3719 regno_coalesced_allocno_num
3720 = (int *) ira_allocate (max_regno
* sizeof (int));
3721 memset (regno_coalesced_allocno_num
, 0, max_regno
* sizeof (int));
3722 setup_coalesced_allocno_costs_and_nums (pseudo_regnos
, n
);
3723 /* Sort regnos according frequencies of the corresponding coalesced
3725 qsort (pseudo_regnos
, n
, sizeof (int), coalesced_pseudo_reg_freq_compare
);
3726 spilled_coalesced_allocnos
3727 = (ira_allocno_t
*) ira_allocate (ira_allocnos_num
3728 * sizeof (ira_allocno_t
));
3729 /* Collect allocnos representing the spilled coalesced allocno
3731 num
= collect_spilled_coalesced_allocnos (pseudo_regnos
, n
,
3732 spilled_coalesced_allocnos
);
3733 if (flag_ira_share_spill_slots
3734 && coalesce_spill_slots (spilled_coalesced_allocnos
, num
))
3736 setup_coalesced_allocno_costs_and_nums (pseudo_regnos
, n
);
3737 qsort (pseudo_regnos
, n
, sizeof (int),
3738 coalesced_pseudo_reg_freq_compare
);
3739 num
= collect_spilled_coalesced_allocnos (pseudo_regnos
, n
,
3740 spilled_coalesced_allocnos
);
3742 ira_free_bitmap (processed_coalesced_allocno_bitmap
);
3743 allocno_coalesced_p
= false;
3744 /* Assign stack slot numbers to spilled allocno sets, use smaller
3745 numbers for most frequently used coalesced allocnos. -1 is
3746 reserved for dynamic search of stack slots for pseudos spilled by
3749 for (i
= 0; i
< num
; i
++)
3751 allocno
= spilled_coalesced_allocnos
[i
];
3752 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
3753 || ALLOCNO_HARD_REGNO (allocno
) >= 0
3754 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno
)))
3756 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3757 fprintf (ira_dump_file
, " Slot %d (freq,size):", slot_num
);
3759 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3760 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3762 ira_assert (ALLOCNO_HARD_REGNO (a
) < 0);
3763 ALLOCNO_HARD_REGNO (a
) = -slot_num
;
3764 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3765 fprintf (ira_dump_file
, " a%dr%d(%d,%d)",
3766 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
), ALLOCNO_FREQ (a
),
3767 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a
)),
3768 reg_max_ref_width
[ALLOCNO_REGNO (a
)]));
3773 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3774 fprintf (ira_dump_file
, "\n");
3776 ira_spilled_reg_stack_slots_num
= slot_num
- 1;
3777 ira_free (spilled_coalesced_allocnos
);
3778 /* Sort regnos according the slot numbers. */
3779 regno_max_ref_width
= reg_max_ref_width
;
3780 qsort (pseudo_regnos
, n
, sizeof (int), coalesced_pseudo_reg_slot_compare
);
3781 FOR_EACH_ALLOCNO (a
, ai
)
3782 ALLOCNO_ADD_DATA (a
) = NULL
;
3783 ira_free (allocno_coalesce_data
);
3784 ira_free (regno_coalesced_allocno_num
);
3785 ira_free (regno_coalesced_allocno_cost
);
3790 /* This page contains code used by the reload pass to improve the
3793 /* The function is called from reload to mark changes in the
3794 allocation of REGNO made by the reload. Remember that reg_renumber
3795 reflects the change result. */
3797 ira_mark_allocation_change (int regno
)
3799 ira_allocno_t a
= ira_regno_allocno_map
[regno
];
3800 int old_hard_regno
, hard_regno
, cost
;
3801 enum reg_class aclass
= ALLOCNO_CLASS (a
);
3803 ira_assert (a
!= NULL
);
3804 hard_regno
= reg_renumber
[regno
];
3805 if ((old_hard_regno
= ALLOCNO_HARD_REGNO (a
)) == hard_regno
)
3807 if (old_hard_regno
< 0)
3808 cost
= -ALLOCNO_MEMORY_COST (a
);
3811 ira_assert (ira_class_hard_reg_index
[aclass
][old_hard_regno
] >= 0);
3812 cost
= -(ALLOCNO_HARD_REG_COSTS (a
) == NULL
3813 ? ALLOCNO_CLASS_COST (a
)
3814 : ALLOCNO_HARD_REG_COSTS (a
)
3815 [ira_class_hard_reg_index
[aclass
][old_hard_regno
]]);
3816 update_copy_costs (a
, false);
3818 ira_overall_cost
-= cost
;
3819 ALLOCNO_HARD_REGNO (a
) = hard_regno
;
3822 ALLOCNO_HARD_REGNO (a
) = -1;
3823 cost
+= ALLOCNO_MEMORY_COST (a
);
3825 else if (ira_class_hard_reg_index
[aclass
][hard_regno
] >= 0)
3827 cost
+= (ALLOCNO_HARD_REG_COSTS (a
) == NULL
3828 ? ALLOCNO_CLASS_COST (a
)
3829 : ALLOCNO_HARD_REG_COSTS (a
)
3830 [ira_class_hard_reg_index
[aclass
][hard_regno
]]);
3831 update_copy_costs (a
, true);
3834 /* Reload changed class of the allocno. */
3836 ira_overall_cost
+= cost
;
3839 /* This function is called when reload deletes memory-memory move. In
3840 this case we marks that the allocation of the corresponding
3841 allocnos should be not changed in future. Otherwise we risk to get
3844 ira_mark_memory_move_deletion (int dst_regno
, int src_regno
)
3846 ira_allocno_t dst
= ira_regno_allocno_map
[dst_regno
];
3847 ira_allocno_t src
= ira_regno_allocno_map
[src_regno
];
3849 ira_assert (dst
!= NULL
&& src
!= NULL
3850 && ALLOCNO_HARD_REGNO (dst
) < 0
3851 && ALLOCNO_HARD_REGNO (src
) < 0);
3852 ALLOCNO_DONT_REASSIGN_P (dst
) = true;
3853 ALLOCNO_DONT_REASSIGN_P (src
) = true;
3856 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
3857 allocno A and return TRUE in the case of success. */
3859 allocno_reload_assign (ira_allocno_t a
, HARD_REG_SET forbidden_regs
)
3862 enum reg_class aclass
;
3863 int regno
= ALLOCNO_REGNO (a
);
3864 HARD_REG_SET saved
[2];
3867 n
= ALLOCNO_NUM_OBJECTS (a
);
3868 for (i
= 0; i
< n
; i
++)
3870 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3871 COPY_HARD_REG_SET (saved
[i
], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
3872 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), forbidden_regs
);
3873 if (! flag_caller_saves
&& ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
3874 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
3877 ALLOCNO_ASSIGNED_P (a
) = false;
3878 aclass
= ALLOCNO_CLASS (a
);
3879 update_curr_costs (a
);
3880 assign_hard_reg (a
, true);
3881 hard_regno
= ALLOCNO_HARD_REGNO (a
);
3882 reg_renumber
[regno
] = hard_regno
;
3884 ALLOCNO_HARD_REGNO (a
) = -1;
3887 ira_assert (ira_class_hard_reg_index
[aclass
][hard_regno
] >= 0);
3889 -= (ALLOCNO_MEMORY_COST (a
)
3890 - (ALLOCNO_HARD_REG_COSTS (a
) == NULL
3891 ? ALLOCNO_CLASS_COST (a
)
3892 : ALLOCNO_HARD_REG_COSTS (a
)[ira_class_hard_reg_index
3893 [aclass
][hard_regno
]]));
3894 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0
3895 && ira_hard_reg_set_intersection_p (hard_regno
, ALLOCNO_MODE (a
),
3898 ira_assert (flag_caller_saves
);
3899 caller_save_needed
= 1;
3903 /* If we found a hard register, modify the RTL for the pseudo
3904 register to show the hard register, and mark the pseudo register
3906 if (reg_renumber
[regno
] >= 0)
3908 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3909 fprintf (ira_dump_file
, ": reassign to %d\n", reg_renumber
[regno
]);
3910 SET_REGNO (regno_reg_rtx
[regno
], reg_renumber
[regno
]);
3911 mark_home_live (regno
);
3913 else if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3914 fprintf (ira_dump_file
, "\n");
3915 for (i
= 0; i
< n
; i
++)
3917 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3918 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), saved
[i
]);
3920 return reg_renumber
[regno
] >= 0;
3923 /* Sort pseudos according their usage frequencies (putting most
3924 frequently ones first). */
3926 pseudo_reg_compare (const void *v1p
, const void *v2p
)
3928 int regno1
= *(const int *) v1p
;
3929 int regno2
= *(const int *) v2p
;
3932 if ((diff
= REG_FREQ (regno2
) - REG_FREQ (regno1
)) != 0)
3934 return regno1
- regno2
;
3937 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
3938 NUM of them) or spilled pseudos conflicting with pseudos in
3939 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
3940 allocation has been changed. The function doesn't use
3941 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
3942 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
3943 is called by the reload pass at the end of each reload
3946 ira_reassign_pseudos (int *spilled_pseudo_regs
, int num
,
3947 HARD_REG_SET bad_spill_regs
,
3948 HARD_REG_SET
*pseudo_forbidden_regs
,
3949 HARD_REG_SET
*pseudo_previous_regs
,
3955 HARD_REG_SET forbidden_regs
;
3956 bitmap temp
= BITMAP_ALLOC (NULL
);
3958 /* Add pseudos which conflict with pseudos already in
3959 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
3960 to allocating in two steps as some of the conflicts might have
3961 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
3962 for (i
= 0; i
< num
; i
++)
3963 bitmap_set_bit (temp
, spilled_pseudo_regs
[i
]);
3965 for (i
= 0, n
= num
; i
< n
; i
++)
3968 int regno
= spilled_pseudo_regs
[i
];
3969 bitmap_set_bit (temp
, regno
);
3971 a
= ira_regno_allocno_map
[regno
];
3972 nr
= ALLOCNO_NUM_OBJECTS (a
);
3973 for (j
= 0; j
< nr
; j
++)
3975 ira_object_t conflict_obj
;
3976 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
3977 ira_object_conflict_iterator oci
;
3979 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
3981 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
3982 if (ALLOCNO_HARD_REGNO (conflict_a
) < 0
3983 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a
)
3984 && bitmap_set_bit (temp
, ALLOCNO_REGNO (conflict_a
)))
3986 spilled_pseudo_regs
[num
++] = ALLOCNO_REGNO (conflict_a
);
3987 /* ?!? This seems wrong. */
3988 bitmap_set_bit (consideration_allocno_bitmap
,
3989 ALLOCNO_NUM (conflict_a
));
3996 qsort (spilled_pseudo_regs
, num
, sizeof (int), pseudo_reg_compare
);
3998 /* Try to assign hard registers to pseudos from
3999 SPILLED_PSEUDO_REGS. */
4000 for (i
= 0; i
< num
; i
++)
4002 regno
= spilled_pseudo_regs
[i
];
4003 COPY_HARD_REG_SET (forbidden_regs
, bad_spill_regs
);
4004 IOR_HARD_REG_SET (forbidden_regs
, pseudo_forbidden_regs
[regno
]);
4005 IOR_HARD_REG_SET (forbidden_regs
, pseudo_previous_regs
[regno
]);
4006 gcc_assert (reg_renumber
[regno
] < 0);
4007 a
= ira_regno_allocno_map
[regno
];
4008 ira_mark_allocation_change (regno
);
4009 ira_assert (reg_renumber
[regno
] < 0);
4010 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4011 fprintf (ira_dump_file
,
4012 " Try Assign %d(a%d), cost=%d", regno
, ALLOCNO_NUM (a
),
4013 ALLOCNO_MEMORY_COST (a
)
4014 - ALLOCNO_CLASS_COST (a
));
4015 allocno_reload_assign (a
, forbidden_regs
);
4016 if (reg_renumber
[regno
] >= 0)
4018 CLEAR_REGNO_REG_SET (spilled
, regno
);
4026 /* The function is called by reload and returns already allocated
4027 stack slot (if any) for REGNO with given INHERENT_SIZE and
4028 TOTAL_SIZE. In the case of failure to find a slot which can be
4029 used for REGNO, the function returns NULL. */
4031 ira_reuse_stack_slot (int regno
, unsigned int inherent_size
,
4032 unsigned int total_size
)
4035 int slot_num
, best_slot_num
;
4036 int cost
, best_cost
;
4037 ira_copy_t cp
, next_cp
;
4038 ira_allocno_t another_allocno
, allocno
= ira_regno_allocno_map
[regno
];
4041 struct ira_spilled_reg_stack_slot
*slot
= NULL
;
4043 ira_assert (inherent_size
== PSEUDO_REGNO_BYTES (regno
)
4044 && inherent_size
<= total_size
4045 && ALLOCNO_HARD_REGNO (allocno
) < 0);
4046 if (! flag_ira_share_spill_slots
)
4048 slot_num
= -ALLOCNO_HARD_REGNO (allocno
) - 2;
4051 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4056 best_cost
= best_slot_num
= -1;
4058 /* It means that the pseudo was spilled in the reload pass, try
4061 slot_num
< ira_spilled_reg_stack_slots_num
;
4064 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4065 if (slot
->mem
== NULL_RTX
)
4067 if (slot
->width
< total_size
4068 || GET_MODE_SIZE (GET_MODE (slot
->mem
)) < inherent_size
)
4071 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4072 FIRST_PSEUDO_REGISTER
, i
, bi
)
4074 another_allocno
= ira_regno_allocno_map
[i
];
4075 if (allocnos_conflict_by_live_ranges_p (allocno
,
4079 for (cost
= 0, cp
= ALLOCNO_COPIES (allocno
);
4083 if (cp
->first
== allocno
)
4085 next_cp
= cp
->next_first_allocno_copy
;
4086 another_allocno
= cp
->second
;
4088 else if (cp
->second
== allocno
)
4090 next_cp
= cp
->next_second_allocno_copy
;
4091 another_allocno
= cp
->first
;
4095 if (cp
->insn
== NULL_RTX
)
4097 if (bitmap_bit_p (&slot
->spilled_regs
,
4098 ALLOCNO_REGNO (another_allocno
)))
4101 if (cost
> best_cost
)
4104 best_slot_num
= slot_num
;
4111 slot_num
= best_slot_num
;
4112 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4113 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4115 ALLOCNO_HARD_REGNO (allocno
) = -slot_num
- 2;
4120 ira_assert (slot
->width
>= total_size
);
4121 #ifdef ENABLE_IRA_CHECKING
4122 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4123 FIRST_PSEUDO_REGISTER
, i
, bi
)
4125 ira_assert (! conflict_by_live_ranges_p (regno
, i
));
4128 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4129 if (internal_flag_ira_verbose
> 3 && ira_dump_file
)
4131 fprintf (ira_dump_file
, " Assigning %d(freq=%d) slot %d of",
4132 regno
, REG_FREQ (regno
), slot_num
);
4133 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4134 FIRST_PSEUDO_REGISTER
, i
, bi
)
4136 if ((unsigned) regno
!= i
)
4137 fprintf (ira_dump_file
, " %d", i
);
4139 fprintf (ira_dump_file
, "\n");
4145 /* This is called by reload every time a new stack slot X with
4146 TOTAL_SIZE was allocated for REGNO. We store this info for
4147 subsequent ira_reuse_stack_slot calls. */
4149 ira_mark_new_stack_slot (rtx x
, int regno
, unsigned int total_size
)
4151 struct ira_spilled_reg_stack_slot
*slot
;
4153 ira_allocno_t allocno
;
4155 ira_assert (PSEUDO_REGNO_BYTES (regno
) <= total_size
);
4156 allocno
= ira_regno_allocno_map
[regno
];
4157 slot_num
= -ALLOCNO_HARD_REGNO (allocno
) - 2;
4160 slot_num
= ira_spilled_reg_stack_slots_num
++;
4161 ALLOCNO_HARD_REGNO (allocno
) = -slot_num
- 2;
4163 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4164 INIT_REG_SET (&slot
->spilled_regs
);
4165 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4167 slot
->width
= total_size
;
4168 if (internal_flag_ira_verbose
> 3 && ira_dump_file
)
4169 fprintf (ira_dump_file
, " Assigning %d(freq=%d) a new slot %d\n",
4170 regno
, REG_FREQ (regno
), slot_num
);
4174 /* Return spill cost for pseudo-registers whose numbers are in array
4175 REGNOS (with a negative number as an end marker) for reload with
4176 given IN and OUT for INSN. Return also number points (through
4177 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4178 the register pressure is high, number of references of the
4179 pseudo-registers (through NREFS), number of callee-clobbered
4180 hard-registers occupied by the pseudo-registers (through
4181 CALL_USED_COUNT), and the first hard regno occupied by the
4182 pseudo-registers (through FIRST_HARD_REGNO). */
4184 calculate_spill_cost (int *regnos
, rtx in
, rtx out
, rtx insn
,
4185 int *excess_pressure_live_length
,
4186 int *nrefs
, int *call_used_count
, int *first_hard_regno
)
4188 int i
, cost
, regno
, hard_regno
, j
, count
, saved_cost
, nregs
;
4194 for (length
= count
= cost
= i
= 0;; i
++)
4199 *nrefs
+= REG_N_REFS (regno
);
4200 hard_regno
= reg_renumber
[regno
];
4201 ira_assert (hard_regno
>= 0);
4202 a
= ira_regno_allocno_map
[regno
];
4203 length
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) / ALLOCNO_NUM_OBJECTS (a
);
4204 cost
+= ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
);
4205 nregs
= hard_regno_nregs
[hard_regno
][ALLOCNO_MODE (a
)];
4206 for (j
= 0; j
< nregs
; j
++)
4207 if (! TEST_HARD_REG_BIT (call_used_reg_set
, hard_regno
+ j
))
4211 in_p
= in
&& REG_P (in
) && (int) REGNO (in
) == hard_regno
;
4212 out_p
= out
&& REG_P (out
) && (int) REGNO (out
) == hard_regno
;
4214 && find_regno_note (insn
, REG_DEAD
, hard_regno
) != NULL_RTX
)
4218 saved_cost
+= ira_memory_move_cost
4219 [ALLOCNO_MODE (a
)][ALLOCNO_CLASS (a
)][1];
4222 += ira_memory_move_cost
4223 [ALLOCNO_MODE (a
)][ALLOCNO_CLASS (a
)][0];
4224 cost
-= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn
)) * saved_cost
;
4227 *excess_pressure_live_length
= length
;
4228 *call_used_count
= count
;
4232 hard_regno
= reg_renumber
[regnos
[0]];
4234 *first_hard_regno
= hard_regno
;
4238 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4239 REGNOS is better than spilling pseudo-registers with numbers in
4240 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4241 function used by the reload pass to make better register spilling
4244 ira_better_spill_reload_regno_p (int *regnos
, int *other_regnos
,
4245 rtx in
, rtx out
, rtx insn
)
4247 int cost
, other_cost
;
4248 int length
, other_length
;
4249 int nrefs
, other_nrefs
;
4250 int call_used_count
, other_call_used_count
;
4251 int hard_regno
, other_hard_regno
;
4253 cost
= calculate_spill_cost (regnos
, in
, out
, insn
,
4254 &length
, &nrefs
, &call_used_count
, &hard_regno
);
4255 other_cost
= calculate_spill_cost (other_regnos
, in
, out
, insn
,
4256 &other_length
, &other_nrefs
,
4257 &other_call_used_count
,
4259 if (nrefs
== 0 && other_nrefs
!= 0)
4261 if (nrefs
!= 0 && other_nrefs
== 0)
4263 if (cost
!= other_cost
)
4264 return cost
< other_cost
;
4265 if (length
!= other_length
)
4266 return length
> other_length
;
4267 #ifdef REG_ALLOC_ORDER
4268 if (hard_regno
>= 0 && other_hard_regno
>= 0)
4269 return (inv_reg_alloc_order
[hard_regno
]
4270 < inv_reg_alloc_order
[other_hard_regno
]);
4272 if (call_used_count
!= other_call_used_count
)
4273 return call_used_count
> other_call_used_count
;
4280 /* Allocate and initialize data necessary for assign_hard_reg. */
4282 ira_initiate_assign (void)
4285 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
4286 * ira_allocnos_num
);
4287 consideration_allocno_bitmap
= ira_allocate_bitmap ();
4288 initiate_cost_update ();
4289 allocno_priorities
= (int *) ira_allocate (sizeof (int) * ira_allocnos_num
);
4292 /* Deallocate data used by assign_hard_reg. */
4294 ira_finish_assign (void)
4296 ira_free (sorted_allocnos
);
4297 ira_free_bitmap (consideration_allocno_bitmap
);
4298 finish_cost_update ();
4299 ira_free (allocno_priorities
);
4304 /* Entry function doing color-based register allocation. */
4308 allocno_stack_vec
.create (ira_allocnos_num
);
4309 memset (allocated_hardreg_p
, 0, sizeof (allocated_hardreg_p
));
4310 ira_initiate_assign ();
4312 ira_finish_assign ();
4313 allocno_stack_vec
.release ();
4314 move_spill_restore ();
4319 /* This page contains a simple register allocator without usage of
4320 allocno conflicts. This is used for fast allocation for -O0. */
4322 /* Do register allocation by not using allocno conflicts. It uses
4323 only allocno live ranges. The algorithm is close to Chow's
4324 priority coloring. */
4326 fast_allocation (void)
4328 int i
, j
, k
, num
, class_size
, hard_regno
;
4330 bool no_stack_reg_p
;
4332 enum reg_class aclass
;
4333 enum machine_mode mode
;
4335 ira_allocno_iterator ai
;
4337 HARD_REG_SET conflict_hard_regs
, *used_hard_regs
;
4339 sorted_allocnos
= (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
4340 * ira_allocnos_num
);
4342 FOR_EACH_ALLOCNO (a
, ai
)
4343 sorted_allocnos
[num
++] = a
;
4344 allocno_priorities
= (int *) ira_allocate (sizeof (int) * ira_allocnos_num
);
4345 setup_allocno_priorities (sorted_allocnos
, num
);
4346 used_hard_regs
= (HARD_REG_SET
*) ira_allocate (sizeof (HARD_REG_SET
)
4348 for (i
= 0; i
< ira_max_point
; i
++)
4349 CLEAR_HARD_REG_SET (used_hard_regs
[i
]);
4350 qsort (sorted_allocnos
, num
, sizeof (ira_allocno_t
),
4351 allocno_priority_compare_func
);
4352 for (i
= 0; i
< num
; i
++)
4356 a
= sorted_allocnos
[i
];
4357 nr
= ALLOCNO_NUM_OBJECTS (a
);
4358 CLEAR_HARD_REG_SET (conflict_hard_regs
);
4359 for (l
= 0; l
< nr
; l
++)
4361 ira_object_t obj
= ALLOCNO_OBJECT (a
, l
);
4362 IOR_HARD_REG_SET (conflict_hard_regs
,
4363 OBJECT_CONFLICT_HARD_REGS (obj
));
4364 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
4365 for (j
= r
->start
; j
<= r
->finish
; j
++)
4366 IOR_HARD_REG_SET (conflict_hard_regs
, used_hard_regs
[j
]);
4368 aclass
= ALLOCNO_CLASS (a
);
4369 ALLOCNO_ASSIGNED_P (a
) = true;
4370 ALLOCNO_HARD_REGNO (a
) = -1;
4371 if (hard_reg_set_subset_p (reg_class_contents
[aclass
],
4372 conflict_hard_regs
))
4374 mode
= ALLOCNO_MODE (a
);
4376 no_stack_reg_p
= ALLOCNO_NO_STACK_REG_P (a
);
4378 class_size
= ira_class_hard_regs_num
[aclass
];
4379 for (j
= 0; j
< class_size
; j
++)
4381 hard_regno
= ira_class_hard_regs
[aclass
][j
];
4383 if (no_stack_reg_p
&& FIRST_STACK_REG
<= hard_regno
4384 && hard_regno
<= LAST_STACK_REG
)
4387 if (ira_hard_reg_set_intersection_p (hard_regno
, mode
, conflict_hard_regs
)
4388 || (TEST_HARD_REG_BIT
4389 (ira_prohibited_class_mode_regs
[aclass
][mode
], hard_regno
)))
4391 ALLOCNO_HARD_REGNO (a
) = hard_regno
;
4392 for (l
= 0; l
< nr
; l
++)
4394 ira_object_t obj
= ALLOCNO_OBJECT (a
, l
);
4395 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
4396 for (k
= r
->start
; k
<= r
->finish
; k
++)
4397 IOR_HARD_REG_SET (used_hard_regs
[k
],
4398 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
4403 ira_free (sorted_allocnos
);
4404 ira_free (used_hard_regs
);
4405 ira_free (allocno_priorities
);
4406 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
4407 ira_print_disposition (ira_dump_file
);
4412 /* Entry function doing coloring. */
4417 ira_allocno_iterator ai
;
4419 /* Setup updated costs. */
4420 FOR_EACH_ALLOCNO (a
, ai
)
4422 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
4423 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
4425 if (ira_conflicts_p
)