1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
32 #include "hash-table.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
36 #include "diagnostic-core.h"
42 typedef struct allocno_hard_regs
*allocno_hard_regs_t
;
44 /* The structure contains information about hard registers can be
45 assigned to allocnos. Usually it is allocno profitable hard
46 registers but in some cases this set can be a bit different. Major
47 reason of the difference is a requirement to use hard register sets
48 that form a tree or a forest (set of trees), i.e. hard register set
49 of a node should contain hard register sets of its subnodes. */
50 struct allocno_hard_regs
52 /* Hard registers can be assigned to an allocno. */
54 /* Overall (spilling) cost of all allocnos with given register
59 typedef struct allocno_hard_regs_node
*allocno_hard_regs_node_t
;
61 /* A node representing allocno hard registers. Such nodes form a
62 forest (set of trees). Each subnode of given node in the forest
63 refers for hard register set (usually allocno profitable hard
64 register set) which is a subset of one referred from given
66 struct allocno_hard_regs_node
68 /* Set up number of the node in preorder traversing of the forest. */
70 /* Used for different calculation like finding conflict size of an
73 /* Used for calculation of conflict size of an allocno. The
74 conflict size of the allocno is maximal number of given allocno
75 hard registers needed for allocation of the conflicting allocnos.
76 Given allocno is trivially colored if this number plus the number
77 of hard registers needed for given allocno is not greater than
78 the number of given allocno hard register set. */
80 /* The number of hard registers given by member hard_regs. */
82 /* The following member is used to form the final forest. */
84 /* Pointer to the corresponding profitable hard registers. */
85 allocno_hard_regs_t hard_regs
;
86 /* Parent, first subnode, previous and next node with the same
87 parent in the forest. */
88 allocno_hard_regs_node_t parent
, first
, prev
, next
;
91 /* Info about changing hard reg costs of an allocno. */
92 struct update_cost_record
94 /* Hard regno for which we changed the cost. */
96 /* Divisor used when we changed the cost of HARD_REGNO. */
98 /* Next record for given allocno. */
99 struct update_cost_record
*next
;
102 /* To decrease footprint of ira_allocno structure we store all data
103 needed only for coloring in the following structure. */
104 struct allocno_color_data
106 /* TRUE value means that the allocno was not removed yet from the
107 conflicting graph during colouring. */
108 unsigned int in_graph_p
: 1;
109 /* TRUE if it is put on the stack to make other allocnos
111 unsigned int may_be_spilled_p
: 1;
112 /* TRUE if the allocno is trivially colorable. */
113 unsigned int colorable_p
: 1;
114 /* Number of hard registers of the allocno class really
115 available for the allocno allocation. It is number of the
116 profitable hard regs. */
117 int available_regs_num
;
118 /* Allocnos in a bucket (used in coloring) chained by the following
120 ira_allocno_t next_bucket_allocno
;
121 ira_allocno_t prev_bucket_allocno
;
122 /* Used for temporary purposes. */
124 /* Used to exclude repeated processing. */
126 /* Profitable hard regs available for this pseudo allocation. It
127 means that the set excludes unavailable hard regs and hard regs
128 conflicting with given pseudo. They should be of the allocno
130 HARD_REG_SET profitable_hard_regs
;
131 /* The allocno hard registers node. */
132 allocno_hard_regs_node_t hard_regs_node
;
133 /* Array of structures allocno_hard_regs_subnode representing
134 given allocno hard registers node (the 1st element in the array)
135 and all its subnodes in the tree (forest) of allocno hard
136 register nodes (see comments above). */
137 int hard_regs_subnodes_start
;
138 /* The length of the previous array. */
139 int hard_regs_subnodes_num
;
140 /* Records about updating allocno hard reg costs from copies. If
141 the allocno did not get expected hard register, these records are
142 used to restore original hard reg costs of allocnos connected to
143 this allocno by copies. */
144 struct update_cost_record
*update_cost_records
;
145 /* Threads. We collect allocnos connected by copies into threads
146 and try to assign hard regs to allocnos by threads. */
147 /* Allocno representing all thread. */
148 ira_allocno_t first_thread_allocno
;
149 /* Allocnos in thread forms a cycle list through the following
151 ira_allocno_t next_thread_allocno
;
152 /* All thread frequency. Defined only for first thread allocno. */
157 typedef struct allocno_color_data
*allocno_color_data_t
;
159 /* Container for storing allocno data concerning coloring. */
160 static allocno_color_data_t allocno_color_data
;
162 /* Macro to access the data concerning coloring. */
163 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
165 /* Used for finding allocno colorability to exclude repeated allocno
166 processing and for updating preferencing to exclude repeated
167 allocno processing during assignment. */
168 static int curr_allocno_process
;
170 /* This file contains code for regional graph coloring, spill/restore
171 code placement optimization, and code helping the reload pass to do
174 /* Bitmap of allocnos which should be colored. */
175 static bitmap coloring_allocno_bitmap
;
177 /* Bitmap of allocnos which should be taken into account during
178 coloring. In general case it contains allocnos from
179 coloring_allocno_bitmap plus other already colored conflicting
181 static bitmap consideration_allocno_bitmap
;
183 /* All allocnos sorted according their priorities. */
184 static ira_allocno_t
*sorted_allocnos
;
186 /* Vec representing the stack of allocnos used during coloring. */
187 static vec
<ira_allocno_t
> allocno_stack_vec
;
189 /* Helper for qsort comparison callbacks - return a positive integer if
190 X > Y, or a negative value otherwise. Use a conditional expression
191 instead of a difference computation to insulate from possible overflow
192 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
193 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
197 /* Definition of vector of allocno hard registers. */
199 /* Vector of unique allocno hard registers. */
200 static vec
<allocno_hard_regs_t
> allocno_hard_regs_vec
;
202 struct allocno_hard_regs_hasher
: typed_noop_remove
<allocno_hard_regs
>
204 typedef allocno_hard_regs value_type
;
205 typedef allocno_hard_regs compare_type
;
206 static inline hashval_t
hash (const value_type
*);
207 static inline bool equal (const value_type
*, const compare_type
*);
210 /* Returns hash value for allocno hard registers V. */
212 allocno_hard_regs_hasher::hash (const value_type
*hv
)
214 return iterative_hash (&hv
->set
, sizeof (HARD_REG_SET
), 0);
217 /* Compares allocno hard registers V1 and V2. */
219 allocno_hard_regs_hasher::equal (const value_type
*hv1
, const compare_type
*hv2
)
221 return hard_reg_set_equal_p (hv1
->set
, hv2
->set
);
224 /* Hash table of unique allocno hard registers. */
225 static hash_table
<allocno_hard_regs_hasher
> allocno_hard_regs_htab
;
227 /* Return allocno hard registers in the hash table equal to HV. */
228 static allocno_hard_regs_t
229 find_hard_regs (allocno_hard_regs_t hv
)
231 return allocno_hard_regs_htab
.find (hv
);
234 /* Insert allocno hard registers HV in the hash table (if it is not
235 there yet) and return the value which in the table. */
236 static allocno_hard_regs_t
237 insert_hard_regs (allocno_hard_regs_t hv
)
239 allocno_hard_regs
**slot
= allocno_hard_regs_htab
.find_slot (hv
, INSERT
);
246 /* Initialize data concerning allocno hard registers. */
248 init_allocno_hard_regs (void)
250 allocno_hard_regs_vec
.create (200);
251 allocno_hard_regs_htab
.create (200);
254 /* Add (or update info about) allocno hard registers with SET and
256 static allocno_hard_regs_t
257 add_allocno_hard_regs (HARD_REG_SET set
, int64_t cost
)
259 struct allocno_hard_regs temp
;
260 allocno_hard_regs_t hv
;
262 gcc_assert (! hard_reg_set_empty_p (set
));
263 COPY_HARD_REG_SET (temp
.set
, set
);
264 if ((hv
= find_hard_regs (&temp
)) != NULL
)
268 hv
= ((struct allocno_hard_regs
*)
269 ira_allocate (sizeof (struct allocno_hard_regs
)));
270 COPY_HARD_REG_SET (hv
->set
, set
);
272 allocno_hard_regs_vec
.safe_push (hv
);
273 insert_hard_regs (hv
);
278 /* Finalize data concerning allocno hard registers. */
280 finish_allocno_hard_regs (void)
283 allocno_hard_regs_t hv
;
286 allocno_hard_regs_vec
.iterate (i
, &hv
);
289 allocno_hard_regs_htab
.dispose ();
290 allocno_hard_regs_vec
.release ();
293 /* Sort hard regs according to their frequency of usage. */
295 allocno_hard_regs_compare (const void *v1p
, const void *v2p
)
297 allocno_hard_regs_t hv1
= *(const allocno_hard_regs_t
*) v1p
;
298 allocno_hard_regs_t hv2
= *(const allocno_hard_regs_t
*) v2p
;
300 if (hv2
->cost
> hv1
->cost
)
302 else if (hv2
->cost
< hv1
->cost
)
310 /* Used for finding a common ancestor of two allocno hard registers
311 nodes in the forest. We use the current value of
312 'node_check_tick' to mark all nodes from one node to the top and
313 then walking up from another node until we find a marked node.
315 It is also used to figure out allocno colorability as a mark that
316 we already reset value of member 'conflict_size' for the forest
317 node corresponding to the processed allocno. */
318 static int node_check_tick
;
320 /* Roots of the forest containing hard register sets can be assigned
322 static allocno_hard_regs_node_t hard_regs_roots
;
324 /* Definition of vector of allocno hard register nodes. */
326 /* Vector used to create the forest. */
327 static vec
<allocno_hard_regs_node_t
> hard_regs_node_vec
;
329 /* Create and return allocno hard registers node containing allocno
330 hard registers HV. */
331 static allocno_hard_regs_node_t
332 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv
)
334 allocno_hard_regs_node_t new_node
;
336 new_node
= ((struct allocno_hard_regs_node
*)
337 ira_allocate (sizeof (struct allocno_hard_regs_node
)));
339 new_node
->hard_regs
= hv
;
340 new_node
->hard_regs_num
= hard_reg_set_size (hv
->set
);
341 new_node
->first
= NULL
;
342 new_node
->used_p
= false;
346 /* Add allocno hard registers node NEW_NODE to the forest on its level
349 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t
*roots
,
350 allocno_hard_regs_node_t new_node
)
352 new_node
->next
= *roots
;
353 if (new_node
->next
!= NULL
)
354 new_node
->next
->prev
= new_node
;
355 new_node
->prev
= NULL
;
359 /* Add allocno hard registers HV (or its best approximation if it is
360 not possible) to the forest on its level given by ROOTS. */
362 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t
*roots
,
363 allocno_hard_regs_t hv
)
365 unsigned int i
, start
;
366 allocno_hard_regs_node_t node
, prev
, new_node
;
367 HARD_REG_SET temp_set
;
368 allocno_hard_regs_t hv2
;
370 start
= hard_regs_node_vec
.length ();
371 for (node
= *roots
; node
!= NULL
; node
= node
->next
)
373 if (hard_reg_set_equal_p (hv
->set
, node
->hard_regs
->set
))
375 if (hard_reg_set_subset_p (hv
->set
, node
->hard_regs
->set
))
377 add_allocno_hard_regs_to_forest (&node
->first
, hv
);
380 if (hard_reg_set_subset_p (node
->hard_regs
->set
, hv
->set
))
381 hard_regs_node_vec
.safe_push (node
);
382 else if (hard_reg_set_intersect_p (hv
->set
, node
->hard_regs
->set
))
384 COPY_HARD_REG_SET (temp_set
, hv
->set
);
385 AND_HARD_REG_SET (temp_set
, node
->hard_regs
->set
);
386 hv2
= add_allocno_hard_regs (temp_set
, hv
->cost
);
387 add_allocno_hard_regs_to_forest (&node
->first
, hv2
);
390 if (hard_regs_node_vec
.length ()
393 /* Create a new node which contains nodes in hard_regs_node_vec. */
394 CLEAR_HARD_REG_SET (temp_set
);
396 i
< hard_regs_node_vec
.length ();
399 node
= hard_regs_node_vec
[i
];
400 IOR_HARD_REG_SET (temp_set
, node
->hard_regs
->set
);
402 hv
= add_allocno_hard_regs (temp_set
, hv
->cost
);
403 new_node
= create_new_allocno_hard_regs_node (hv
);
406 i
< hard_regs_node_vec
.length ();
409 node
= hard_regs_node_vec
[i
];
410 if (node
->prev
== NULL
)
413 node
->prev
->next
= node
->next
;
414 if (node
->next
!= NULL
)
415 node
->next
->prev
= node
->prev
;
417 new_node
->first
= node
;
424 add_new_allocno_hard_regs_node_to_forest (roots
, new_node
);
426 hard_regs_node_vec
.truncate (start
);
429 /* Add allocno hard registers nodes starting with the forest level
430 given by FIRST which contains biggest set inside SET. */
432 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first
,
435 allocno_hard_regs_node_t node
;
437 ira_assert (first
!= NULL
);
438 for (node
= first
; node
!= NULL
; node
= node
->next
)
439 if (hard_reg_set_subset_p (node
->hard_regs
->set
, set
))
440 hard_regs_node_vec
.safe_push (node
);
441 else if (hard_reg_set_intersect_p (set
, node
->hard_regs
->set
))
442 collect_allocno_hard_regs_cover (node
->first
, set
);
445 /* Set up field parent as PARENT in all allocno hard registers nodes
446 in forest given by FIRST. */
448 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first
,
449 allocno_hard_regs_node_t parent
)
451 allocno_hard_regs_node_t node
;
453 for (node
= first
; node
!= NULL
; node
= node
->next
)
455 node
->parent
= parent
;
456 setup_allocno_hard_regs_nodes_parent (node
->first
, node
);
460 /* Return allocno hard registers node which is a first common ancestor
461 node of FIRST and SECOND in the forest. */
462 static allocno_hard_regs_node_t
463 first_common_ancestor_node (allocno_hard_regs_node_t first
,
464 allocno_hard_regs_node_t second
)
466 allocno_hard_regs_node_t node
;
469 for (node
= first
; node
!= NULL
; node
= node
->parent
)
470 node
->check
= node_check_tick
;
471 for (node
= second
; node
!= NULL
; node
= node
->parent
)
472 if (node
->check
== node_check_tick
)
474 return first_common_ancestor_node (second
, first
);
477 /* Print hard reg set SET to F. */
479 print_hard_reg_set (FILE *f
, HARD_REG_SET set
, bool new_line_p
)
483 for (start
= -1, i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
485 if (TEST_HARD_REG_BIT (set
, i
))
487 if (i
== 0 || ! TEST_HARD_REG_BIT (set
, i
- 1))
491 && (i
== FIRST_PSEUDO_REGISTER
- 1 || ! TEST_HARD_REG_BIT (set
, i
)))
494 fprintf (f
, " %d", start
);
495 else if (start
== i
- 2)
496 fprintf (f
, " %d %d", start
, start
+ 1);
498 fprintf (f
, " %d-%d", start
, i
- 1);
506 /* Print allocno hard register subforest given by ROOTS and its LEVEL
509 print_hard_regs_subforest (FILE *f
, allocno_hard_regs_node_t roots
,
513 allocno_hard_regs_node_t node
;
515 for (node
= roots
; node
!= NULL
; node
= node
->next
)
518 for (i
= 0; i
< level
* 2; i
++)
520 fprintf (f
, "%d:(", node
->preorder_num
);
521 print_hard_reg_set (f
, node
->hard_regs
->set
, false);
522 fprintf (f
, ")@%"PRId64
"\n", node
->hard_regs
->cost
);
523 print_hard_regs_subforest (f
, node
->first
, level
+ 1);
527 /* Print the allocno hard register forest to F. */
529 print_hard_regs_forest (FILE *f
)
531 fprintf (f
, " Hard reg set forest:\n");
532 print_hard_regs_subforest (f
, hard_regs_roots
, 1);
535 /* Print the allocno hard register forest to stderr. */
537 ira_debug_hard_regs_forest (void)
539 print_hard_regs_forest (stderr
);
542 /* Remove unused allocno hard registers nodes from forest given by its
545 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t
*roots
)
547 allocno_hard_regs_node_t node
, prev
, next
, last
;
549 for (prev
= NULL
, node
= *roots
; node
!= NULL
; node
= next
)
554 remove_unused_allocno_hard_regs_nodes (&node
->first
);
559 for (last
= node
->first
;
560 last
!= NULL
&& last
->next
!= NULL
;
566 *roots
= node
->first
;
568 prev
->next
= node
->first
;
588 /* Set up fields preorder_num starting with START_NUM in all allocno
589 hard registers nodes in forest given by FIRST. Return biggest set
590 PREORDER_NUM increased by 1. */
592 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first
,
593 allocno_hard_regs_node_t parent
,
596 allocno_hard_regs_node_t node
;
598 for (node
= first
; node
!= NULL
; node
= node
->next
)
600 node
->preorder_num
= start_num
++;
601 node
->parent
= parent
;
602 start_num
= enumerate_allocno_hard_regs_nodes (node
->first
, node
,
608 /* Number of allocno hard registers nodes in the forest. */
609 static int allocno_hard_regs_nodes_num
;
611 /* Table preorder number of allocno hard registers node in the forest
612 -> the allocno hard registers node. */
613 static allocno_hard_regs_node_t
*allocno_hard_regs_nodes
;
616 typedef struct allocno_hard_regs_subnode
*allocno_hard_regs_subnode_t
;
618 /* The structure is used to describes all subnodes (not only immediate
619 ones) in the mentioned above tree for given allocno hard register
620 node. The usage of such data accelerates calculation of
621 colorability of given allocno. */
622 struct allocno_hard_regs_subnode
624 /* The conflict size of conflicting allocnos whose hard register
625 sets are equal sets (plus supersets if given node is given
626 allocno hard registers node) of one in the given node. */
627 int left_conflict_size
;
628 /* The summary conflict size of conflicting allocnos whose hard
629 register sets are strict subsets of one in the given node.
630 Overall conflict size is
631 left_conflict_subnodes_size
632 + MIN (max_node_impact - left_conflict_subnodes_size,
635 short left_conflict_subnodes_size
;
636 short max_node_impact
;
639 /* Container for hard regs subnodes of all allocnos. */
640 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes
;
642 /* Table (preorder number of allocno hard registers node in the
643 forest, preorder number of allocno hard registers subnode) -> index
644 of the subnode relative to the node. -1 if it is not a
646 static int *allocno_hard_regs_subnode_index
;
648 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
649 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
651 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first
)
653 allocno_hard_regs_node_t node
, parent
;
656 for (node
= first
; node
!= NULL
; node
= node
->next
)
658 allocno_hard_regs_nodes
[node
->preorder_num
] = node
;
659 for (parent
= node
; parent
!= NULL
; parent
= parent
->parent
)
661 index
= parent
->preorder_num
* allocno_hard_regs_nodes_num
;
662 allocno_hard_regs_subnode_index
[index
+ node
->preorder_num
]
663 = node
->preorder_num
- parent
->preorder_num
;
665 setup_allocno_hard_regs_subnode_index (node
->first
);
669 /* Count all allocno hard registers nodes in tree ROOT. */
671 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root
)
675 for (root
= root
->first
; root
!= NULL
; root
= root
->next
)
676 len
+= get_allocno_hard_regs_subnodes_num (root
);
680 /* Build the forest of allocno hard registers nodes and assign each
681 allocno a node from the forest. */
683 form_allocno_hard_regs_nodes_forest (void)
685 unsigned int i
, j
, size
, len
;
688 allocno_hard_regs_t hv
;
691 allocno_hard_regs_node_t node
, allocno_hard_regs_node
;
692 allocno_color_data_t allocno_data
;
695 init_allocno_hard_regs ();
696 hard_regs_roots
= NULL
;
697 hard_regs_node_vec
.create (100);
698 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
699 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs
, i
))
701 CLEAR_HARD_REG_SET (temp
);
702 SET_HARD_REG_BIT (temp
, i
);
703 hv
= add_allocno_hard_regs (temp
, 0);
704 node
= create_new_allocno_hard_regs_node (hv
);
705 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots
, node
);
707 start
= allocno_hard_regs_vec
.length ();
708 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
711 allocno_data
= ALLOCNO_COLOR_DATA (a
);
713 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
715 hv
= (add_allocno_hard_regs
716 (allocno_data
->profitable_hard_regs
,
717 ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
)));
719 SET_HARD_REG_SET (temp
);
720 AND_COMPL_HARD_REG_SET (temp
, ira_no_alloc_regs
);
721 add_allocno_hard_regs (temp
, 0);
722 qsort (allocno_hard_regs_vec
.address () + start
,
723 allocno_hard_regs_vec
.length () - start
,
724 sizeof (allocno_hard_regs_t
), allocno_hard_regs_compare
);
726 allocno_hard_regs_vec
.iterate (i
, &hv
);
729 add_allocno_hard_regs_to_forest (&hard_regs_roots
, hv
);
730 ira_assert (hard_regs_node_vec
.length () == 0);
732 /* We need to set up parent fields for right work of
733 first_common_ancestor_node. */
734 setup_allocno_hard_regs_nodes_parent (hard_regs_roots
, NULL
);
735 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
738 allocno_data
= ALLOCNO_COLOR_DATA (a
);
739 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
741 hard_regs_node_vec
.truncate (0);
742 collect_allocno_hard_regs_cover (hard_regs_roots
,
743 allocno_data
->profitable_hard_regs
);
744 allocno_hard_regs_node
= NULL
;
745 for (j
= 0; hard_regs_node_vec
.iterate (j
, &node
); j
++)
746 allocno_hard_regs_node
749 : first_common_ancestor_node (node
, allocno_hard_regs_node
));
750 /* That is a temporary storage. */
751 allocno_hard_regs_node
->used_p
= true;
752 allocno_data
->hard_regs_node
= allocno_hard_regs_node
;
754 ira_assert (hard_regs_roots
->next
== NULL
);
755 hard_regs_roots
->used_p
= true;
756 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots
);
757 allocno_hard_regs_nodes_num
758 = enumerate_allocno_hard_regs_nodes (hard_regs_roots
, NULL
, 0);
759 allocno_hard_regs_nodes
760 = ((allocno_hard_regs_node_t
*)
761 ira_allocate (allocno_hard_regs_nodes_num
762 * sizeof (allocno_hard_regs_node_t
)));
763 size
= allocno_hard_regs_nodes_num
* allocno_hard_regs_nodes_num
;
764 allocno_hard_regs_subnode_index
765 = (int *) ira_allocate (size
* sizeof (int));
766 for (i
= 0; i
< size
; i
++)
767 allocno_hard_regs_subnode_index
[i
] = -1;
768 setup_allocno_hard_regs_subnode_index (hard_regs_roots
);
770 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
773 allocno_data
= ALLOCNO_COLOR_DATA (a
);
774 if (hard_reg_set_empty_p (allocno_data
->profitable_hard_regs
))
776 len
= get_allocno_hard_regs_subnodes_num (allocno_data
->hard_regs_node
);
777 allocno_data
->hard_regs_subnodes_start
= start
;
778 allocno_data
->hard_regs_subnodes_num
= len
;
781 allocno_hard_regs_subnodes
782 = ((allocno_hard_regs_subnode_t
)
783 ira_allocate (sizeof (struct allocno_hard_regs_subnode
) * start
));
784 hard_regs_node_vec
.release ();
787 /* Free tree of allocno hard registers nodes given by its ROOT. */
789 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root
)
791 allocno_hard_regs_node_t child
, next
;
793 for (child
= root
->first
; child
!= NULL
; child
= next
)
796 finish_allocno_hard_regs_nodes_tree (child
);
801 /* Finish work with the forest of allocno hard registers nodes. */
803 finish_allocno_hard_regs_nodes_forest (void)
805 allocno_hard_regs_node_t node
, next
;
807 ira_free (allocno_hard_regs_subnodes
);
808 for (node
= hard_regs_roots
; node
!= NULL
; node
= next
)
811 finish_allocno_hard_regs_nodes_tree (node
);
813 ira_free (allocno_hard_regs_nodes
);
814 ira_free (allocno_hard_regs_subnode_index
);
815 finish_allocno_hard_regs ();
818 /* Set up left conflict sizes and left conflict subnodes sizes of hard
819 registers subnodes of allocno A. Return TRUE if allocno A is
820 trivially colorable. */
822 setup_left_conflict_sizes_p (ira_allocno_t a
)
824 int i
, k
, nobj
, start
;
825 int conflict_size
, left_conflict_subnodes_size
, node_preorder_num
;
826 allocno_color_data_t data
;
827 HARD_REG_SET profitable_hard_regs
;
828 allocno_hard_regs_subnode_t subnodes
;
829 allocno_hard_regs_node_t node
;
830 HARD_REG_SET node_set
;
832 nobj
= ALLOCNO_NUM_OBJECTS (a
);
834 data
= ALLOCNO_COLOR_DATA (a
);
835 subnodes
= allocno_hard_regs_subnodes
+ data
->hard_regs_subnodes_start
;
836 COPY_HARD_REG_SET (profitable_hard_regs
, data
->profitable_hard_regs
);
837 node
= data
->hard_regs_node
;
838 node_preorder_num
= node
->preorder_num
;
839 COPY_HARD_REG_SET (node_set
, node
->hard_regs
->set
);
841 for (k
= 0; k
< nobj
; k
++)
843 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
844 ira_object_t conflict_obj
;
845 ira_object_conflict_iterator oci
;
847 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
850 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
851 allocno_hard_regs_node_t conflict_node
, temp_node
;
852 HARD_REG_SET conflict_node_set
;
853 allocno_color_data_t conflict_data
;
855 conflict_data
= ALLOCNO_COLOR_DATA (conflict_a
);
856 if (! ALLOCNO_COLOR_DATA (conflict_a
)->in_graph_p
857 || ! hard_reg_set_intersect_p (profitable_hard_regs
,
859 ->profitable_hard_regs
))
861 conflict_node
= conflict_data
->hard_regs_node
;
862 COPY_HARD_REG_SET (conflict_node_set
, conflict_node
->hard_regs
->set
);
863 if (hard_reg_set_subset_p (node_set
, conflict_node_set
))
867 ira_assert (hard_reg_set_subset_p (conflict_node_set
, node_set
));
868 temp_node
= conflict_node
;
870 if (temp_node
->check
!= node_check_tick
)
872 temp_node
->check
= node_check_tick
;
873 temp_node
->conflict_size
= 0;
875 size
= (ira_reg_class_max_nregs
876 [ALLOCNO_CLASS (conflict_a
)][ALLOCNO_MODE (conflict_a
)]);
877 if (ALLOCNO_NUM_OBJECTS (conflict_a
) > 1)
878 /* We will deal with the subwords individually. */
880 temp_node
->conflict_size
+= size
;
883 for (i
= 0; i
< data
->hard_regs_subnodes_num
; i
++)
885 allocno_hard_regs_node_t temp_node
;
887 temp_node
= allocno_hard_regs_nodes
[i
+ node_preorder_num
];
888 ira_assert (temp_node
->preorder_num
== i
+ node_preorder_num
);
889 subnodes
[i
].left_conflict_size
= (temp_node
->check
!= node_check_tick
890 ? 0 : temp_node
->conflict_size
);
891 if (hard_reg_set_subset_p (temp_node
->hard_regs
->set
,
892 profitable_hard_regs
))
893 subnodes
[i
].max_node_impact
= temp_node
->hard_regs_num
;
896 HARD_REG_SET temp_set
;
897 int j
, n
, hard_regno
;
898 enum reg_class aclass
;
900 COPY_HARD_REG_SET (temp_set
, temp_node
->hard_regs
->set
);
901 AND_HARD_REG_SET (temp_set
, profitable_hard_regs
);
902 aclass
= ALLOCNO_CLASS (a
);
903 for (n
= 0, j
= ira_class_hard_regs_num
[aclass
] - 1; j
>= 0; j
--)
905 hard_regno
= ira_class_hard_regs
[aclass
][j
];
906 if (TEST_HARD_REG_BIT (temp_set
, hard_regno
))
909 subnodes
[i
].max_node_impact
= n
;
911 subnodes
[i
].left_conflict_subnodes_size
= 0;
913 start
= node_preorder_num
* allocno_hard_regs_nodes_num
;
914 for (i
= data
->hard_regs_subnodes_num
- 1; i
>= 0; i
--)
917 allocno_hard_regs_node_t parent
;
919 size
= (subnodes
[i
].left_conflict_subnodes_size
920 + MIN (subnodes
[i
].max_node_impact
921 - subnodes
[i
].left_conflict_subnodes_size
,
922 subnodes
[i
].left_conflict_size
));
923 parent
= allocno_hard_regs_nodes
[i
+ node_preorder_num
]->parent
;
927 = allocno_hard_regs_subnode_index
[start
+ parent
->preorder_num
];
930 subnodes
[parent_i
].left_conflict_subnodes_size
+= size
;
932 left_conflict_subnodes_size
= subnodes
[0].left_conflict_subnodes_size
;
934 += (left_conflict_subnodes_size
935 + MIN (subnodes
[0].max_node_impact
- left_conflict_subnodes_size
,
936 subnodes
[0].left_conflict_size
));
937 conflict_size
+= ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)];
938 data
->colorable_p
= conflict_size
<= data
->available_regs_num
;
939 return data
->colorable_p
;
942 /* Update left conflict sizes of hard registers subnodes of allocno A
943 after removing allocno REMOVED_A with SIZE from the conflict graph.
944 Return TRUE if A is trivially colorable. */
946 update_left_conflict_sizes_p (ira_allocno_t a
,
947 ira_allocno_t removed_a
, int size
)
949 int i
, conflict_size
, before_conflict_size
, diff
, start
;
950 int node_preorder_num
, parent_i
;
951 allocno_hard_regs_node_t node
, removed_node
, parent
;
952 allocno_hard_regs_subnode_t subnodes
;
953 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
955 ira_assert (! data
->colorable_p
);
956 node
= data
->hard_regs_node
;
957 node_preorder_num
= node
->preorder_num
;
958 removed_node
= ALLOCNO_COLOR_DATA (removed_a
)->hard_regs_node
;
959 ira_assert (hard_reg_set_subset_p (removed_node
->hard_regs
->set
,
960 node
->hard_regs
->set
)
961 || hard_reg_set_subset_p (node
->hard_regs
->set
,
962 removed_node
->hard_regs
->set
));
963 start
= node_preorder_num
* allocno_hard_regs_nodes_num
;
964 i
= allocno_hard_regs_subnode_index
[start
+ removed_node
->preorder_num
];
967 subnodes
= allocno_hard_regs_subnodes
+ data
->hard_regs_subnodes_start
;
969 = (subnodes
[i
].left_conflict_subnodes_size
970 + MIN (subnodes
[i
].max_node_impact
971 - subnodes
[i
].left_conflict_subnodes_size
,
972 subnodes
[i
].left_conflict_size
));
973 subnodes
[i
].left_conflict_size
-= size
;
977 = (subnodes
[i
].left_conflict_subnodes_size
978 + MIN (subnodes
[i
].max_node_impact
979 - subnodes
[i
].left_conflict_subnodes_size
,
980 subnodes
[i
].left_conflict_size
));
981 if ((diff
= before_conflict_size
- conflict_size
) == 0)
983 ira_assert (conflict_size
< before_conflict_size
);
984 parent
= allocno_hard_regs_nodes
[i
+ node_preorder_num
]->parent
;
988 = allocno_hard_regs_subnode_index
[start
+ parent
->preorder_num
];
993 = (subnodes
[i
].left_conflict_subnodes_size
994 + MIN (subnodes
[i
].max_node_impact
995 - subnodes
[i
].left_conflict_subnodes_size
,
996 subnodes
[i
].left_conflict_size
));
997 subnodes
[i
].left_conflict_subnodes_size
-= diff
;
1001 + ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]
1002 > data
->available_regs_num
))
1004 data
->colorable_p
= true;
1008 /* Return true if allocno A has empty profitable hard regs. */
1010 empty_profitable_hard_regs (ira_allocno_t a
)
1012 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
1014 return hard_reg_set_empty_p (data
->profitable_hard_regs
);
1017 /* Set up profitable hard registers for each allocno being
1020 setup_profitable_hard_regs (void)
1023 int j
, k
, nobj
, hard_regno
, nregs
, class_size
;
1026 enum reg_class aclass
;
1027 enum machine_mode mode
;
1028 allocno_color_data_t data
;
1030 /* Initial set up from allocno classes and explicitly conflicting
1032 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
1034 a
= ira_allocnos
[i
];
1035 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
)
1037 data
= ALLOCNO_COLOR_DATA (a
);
1038 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
1039 && ALLOCNO_CLASS_COST (a
) > ALLOCNO_MEMORY_COST (a
))
1040 CLEAR_HARD_REG_SET (data
->profitable_hard_regs
);
1043 mode
= ALLOCNO_MODE (a
);
1044 COPY_HARD_REG_SET (data
->profitable_hard_regs
,
1045 ira_useful_class_mode_regs
[aclass
][mode
]);
1046 nobj
= ALLOCNO_NUM_OBJECTS (a
);
1047 for (k
= 0; k
< nobj
; k
++)
1049 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
1051 AND_COMPL_HARD_REG_SET (data
->profitable_hard_regs
,
1052 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
1056 /* Exclude hard regs already assigned for conflicting objects. */
1057 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, i
, bi
)
1059 a
= ira_allocnos
[i
];
1060 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
1061 || ! ALLOCNO_ASSIGNED_P (a
)
1062 || (hard_regno
= ALLOCNO_HARD_REGNO (a
)) < 0)
1064 mode
= ALLOCNO_MODE (a
);
1065 nregs
= hard_regno_nregs
[hard_regno
][mode
];
1066 nobj
= ALLOCNO_NUM_OBJECTS (a
);
1067 for (k
= 0; k
< nobj
; k
++)
1069 ira_object_t obj
= ALLOCNO_OBJECT (a
, k
);
1070 ira_object_t conflict_obj
;
1071 ira_object_conflict_iterator oci
;
1073 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
1075 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
1077 /* We can process the conflict allocno repeatedly with
1079 if (nregs
== nobj
&& nregs
> 1)
1081 int num
= OBJECT_SUBWORD (conflict_obj
);
1083 if (REG_WORDS_BIG_ENDIAN
)
1085 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1086 hard_regno
+ nobj
- num
- 1);
1089 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1093 AND_COMPL_HARD_REG_SET
1094 (ALLOCNO_COLOR_DATA (conflict_a
)->profitable_hard_regs
,
1095 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
1099 /* Exclude too costly hard regs. */
1100 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
1102 int min_cost
= INT_MAX
;
1105 a
= ira_allocnos
[i
];
1106 if ((aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
1107 || empty_profitable_hard_regs (a
))
1109 data
= ALLOCNO_COLOR_DATA (a
);
1110 mode
= ALLOCNO_MODE (a
);
1111 if ((costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
)) != NULL
1112 || (costs
= ALLOCNO_HARD_REG_COSTS (a
)) != NULL
)
1114 class_size
= ira_class_hard_regs_num
[aclass
];
1115 for (j
= 0; j
< class_size
; j
++)
1117 hard_regno
= ira_class_hard_regs
[aclass
][j
];
1118 if (! TEST_HARD_REG_BIT (data
->profitable_hard_regs
,
1121 if (ALLOCNO_UPDATED_MEMORY_COST (a
) < costs
[j
])
1122 CLEAR_HARD_REG_BIT (data
->profitable_hard_regs
,
1124 else if (min_cost
> costs
[j
])
1125 min_cost
= costs
[j
];
1128 else if (ALLOCNO_UPDATED_MEMORY_COST (a
)
1129 < ALLOCNO_UPDATED_CLASS_COST (a
))
1130 CLEAR_HARD_REG_SET (data
->profitable_hard_regs
);
1131 if (ALLOCNO_UPDATED_CLASS_COST (a
) > min_cost
)
1132 ALLOCNO_UPDATED_CLASS_COST (a
) = min_cost
;
1138 /* This page contains functions used to choose hard registers for
1141 /* Pool for update cost records. */
1142 static alloc_pool update_cost_record_pool
;
1144 /* Initiate update cost records. */
1146 init_update_cost_records (void)
1148 update_cost_record_pool
1149 = create_alloc_pool ("update cost records",
1150 sizeof (struct update_cost_record
), 100);
1153 /* Return new update cost record with given params. */
1154 static struct update_cost_record
*
1155 get_update_cost_record (int hard_regno
, int divisor
,
1156 struct update_cost_record
*next
)
1158 struct update_cost_record
*record
;
1160 record
= (struct update_cost_record
*) pool_alloc (update_cost_record_pool
);
1161 record
->hard_regno
= hard_regno
;
1162 record
->divisor
= divisor
;
1163 record
->next
= next
;
1167 /* Free memory for all records in LIST. */
1169 free_update_cost_record_list (struct update_cost_record
*list
)
1171 struct update_cost_record
*next
;
1173 while (list
!= NULL
)
1176 pool_free (update_cost_record_pool
, list
);
1181 /* Free memory allocated for all update cost records. */
1183 finish_update_cost_records (void)
1185 free_alloc_pool (update_cost_record_pool
);
1188 /* Array whose element value is TRUE if the corresponding hard
1189 register was already allocated for an allocno. */
1190 static bool allocated_hardreg_p
[FIRST_PSEUDO_REGISTER
];
1192 /* Describes one element in a queue of allocnos whose costs need to be
1193 updated. Each allocno in the queue is known to have an allocno
1195 struct update_cost_queue_elem
1197 /* This element is in the queue iff CHECK == update_cost_check. */
1200 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1201 connecting this allocno to the one being allocated. */
1204 /* Allocno from which we are chaning costs of connected allocnos.
1205 It is used not go back in graph of allocnos connected by
1209 /* The next allocno in the queue, or null if this is the last element. */
1213 /* The first element in a queue of allocnos whose copy costs need to be
1214 updated. Null if the queue is empty. */
1215 static ira_allocno_t update_cost_queue
;
1217 /* The last element in the queue described by update_cost_queue.
1218 Not valid if update_cost_queue is null. */
1219 static struct update_cost_queue_elem
*update_cost_queue_tail
;
1221 /* A pool of elements in the queue described by update_cost_queue.
1222 Elements are indexed by ALLOCNO_NUM. */
1223 static struct update_cost_queue_elem
*update_cost_queue_elems
;
1225 /* The current value of update_costs_from_copies call count. */
1226 static int update_cost_check
;
1228 /* Allocate and initialize data necessary for function
1229 update_costs_from_copies. */
1231 initiate_cost_update (void)
1235 size
= ira_allocnos_num
* sizeof (struct update_cost_queue_elem
);
1236 update_cost_queue_elems
1237 = (struct update_cost_queue_elem
*) ira_allocate (size
);
1238 memset (update_cost_queue_elems
, 0, size
);
1239 update_cost_check
= 0;
1240 init_update_cost_records ();
1243 /* Deallocate data used by function update_costs_from_copies. */
1245 finish_cost_update (void)
1247 ira_free (update_cost_queue_elems
);
1248 finish_update_cost_records ();
1251 /* When we traverse allocnos to update hard register costs, the cost
1252 divisor will be multiplied by the following macro value for each
1253 hop from given allocno to directly connected allocnos. */
1254 #define COST_HOP_DIVISOR 4
1256 /* Start a new cost-updating pass. */
1258 start_update_cost (void)
1260 update_cost_check
++;
1261 update_cost_queue
= NULL
;
1264 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1265 ALLOCNO is already in the queue, or has NO_REGS class. */
1267 queue_update_cost (ira_allocno_t allocno
, ira_allocno_t from
, int divisor
)
1269 struct update_cost_queue_elem
*elem
;
1271 elem
= &update_cost_queue_elems
[ALLOCNO_NUM (allocno
)];
1272 if (elem
->check
!= update_cost_check
1273 && ALLOCNO_CLASS (allocno
) != NO_REGS
)
1275 elem
->check
= update_cost_check
;
1277 elem
->divisor
= divisor
;
1279 if (update_cost_queue
== NULL
)
1280 update_cost_queue
= allocno
;
1282 update_cost_queue_tail
->next
= allocno
;
1283 update_cost_queue_tail
= elem
;
1287 /* Try to remove the first element from update_cost_queue. Return
1288 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1289 *DIVISOR) describe the removed element. */
1291 get_next_update_cost (ira_allocno_t
*allocno
, ira_allocno_t
*from
, int *divisor
)
1293 struct update_cost_queue_elem
*elem
;
1295 if (update_cost_queue
== NULL
)
1298 *allocno
= update_cost_queue
;
1299 elem
= &update_cost_queue_elems
[ALLOCNO_NUM (*allocno
)];
1301 *divisor
= elem
->divisor
;
1302 update_cost_queue
= elem
->next
;
1306 /* Increase costs of HARD_REGNO by UPDATE_COST for ALLOCNO. Return
1307 true if we really modified the cost. */
1309 update_allocno_cost (ira_allocno_t allocno
, int hard_regno
, int update_cost
)
1312 enum reg_class aclass
= ALLOCNO_CLASS (allocno
);
1314 i
= ira_class_hard_reg_index
[aclass
][hard_regno
];
1317 ira_allocate_and_set_or_copy_costs
1318 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno
), aclass
,
1319 ALLOCNO_UPDATED_CLASS_COST (allocno
),
1320 ALLOCNO_HARD_REG_COSTS (allocno
));
1321 ira_allocate_and_set_or_copy_costs
1322 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno
),
1323 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno
));
1324 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno
)[i
] += update_cost
;
1325 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno
)[i
] += update_cost
;
1329 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1330 by copies to ALLOCNO to increase chances to remove some copies as
1331 the result of subsequent assignment. Record cost updates if
1332 RECORD_P is true. */
1334 update_costs_from_allocno (ira_allocno_t allocno
, int hard_regno
,
1335 int divisor
, bool decr_p
, bool record_p
)
1337 int cost
, update_cost
;
1338 enum machine_mode mode
;
1339 enum reg_class rclass
, aclass
;
1340 ira_allocno_t another_allocno
, from
= NULL
;
1341 ira_copy_t cp
, next_cp
;
1343 rclass
= REGNO_REG_CLASS (hard_regno
);
1346 mode
= ALLOCNO_MODE (allocno
);
1347 ira_init_register_move_cost_if_necessary (mode
);
1348 for (cp
= ALLOCNO_COPIES (allocno
); cp
!= NULL
; cp
= next_cp
)
1350 if (cp
->first
== allocno
)
1352 next_cp
= cp
->next_first_allocno_copy
;
1353 another_allocno
= cp
->second
;
1355 else if (cp
->second
== allocno
)
1357 next_cp
= cp
->next_second_allocno_copy
;
1358 another_allocno
= cp
->first
;
1363 if (another_allocno
== from
)
1366 aclass
= ALLOCNO_CLASS (another_allocno
);
1367 if (! TEST_HARD_REG_BIT (reg_class_contents
[aclass
],
1369 || ALLOCNO_ASSIGNED_P (another_allocno
))
1372 cost
= (cp
->second
== allocno
1373 ? ira_register_move_cost
[mode
][rclass
][aclass
]
1374 : ira_register_move_cost
[mode
][aclass
][rclass
]);
1378 update_cost
= cp
->freq
* cost
/ divisor
;
1379 if (update_cost
== 0)
1382 if (! update_allocno_cost (another_allocno
, hard_regno
, update_cost
))
1384 queue_update_cost (another_allocno
, allocno
, divisor
* COST_HOP_DIVISOR
);
1385 if (record_p
&& ALLOCNO_COLOR_DATA (another_allocno
) != NULL
)
1386 ALLOCNO_COLOR_DATA (another_allocno
)->update_cost_records
1387 = get_update_cost_record (hard_regno
, divisor
,
1388 ALLOCNO_COLOR_DATA (another_allocno
)
1389 ->update_cost_records
);
1392 while (get_next_update_cost (&allocno
, &from
, &divisor
));
1395 /* Decrease preferred ALLOCNO hard register costs and costs of
1396 allocnos connected to ALLOCNO through copy. */
1398 update_costs_from_prefs (ira_allocno_t allocno
)
1402 start_update_cost ();
1403 for (pref
= ALLOCNO_PREFS (allocno
); pref
!= NULL
; pref
= pref
->next_pref
)
1404 update_costs_from_allocno (allocno
, pref
->hard_regno
,
1405 COST_HOP_DIVISOR
, true, true);
1408 /* Update (decrease if DECR_P) the cost of allocnos connected to
1409 ALLOCNO through copies to increase chances to remove some copies as
1410 the result of subsequent assignment. ALLOCNO was just assigned to
1411 a hard register. Record cost updates if RECORD_P is true. */
1413 update_costs_from_copies (ira_allocno_t allocno
, bool decr_p
, bool record_p
)
1417 hard_regno
= ALLOCNO_HARD_REGNO (allocno
);
1418 ira_assert (hard_regno
>= 0 && ALLOCNO_CLASS (allocno
) != NO_REGS
);
1419 start_update_cost ();
1420 update_costs_from_allocno (allocno
, hard_regno
, 1, decr_p
, record_p
);
1423 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1424 before updating costs of these allocnos from given allocno. This
1425 is a wise thing to do as if given allocno did not get an expected
1426 hard reg, using smaller cost of the hard reg for allocnos connected
1427 by copies to given allocno becomes actually misleading. Free all
1428 update cost records for ALLOCNO as we don't need them anymore. */
1430 restore_costs_from_copies (ira_allocno_t allocno
)
1432 struct update_cost_record
*records
, *curr
;
1434 if (ALLOCNO_COLOR_DATA (allocno
) == NULL
)
1436 records
= ALLOCNO_COLOR_DATA (allocno
)->update_cost_records
;
1437 start_update_cost ();
1438 for (curr
= records
; curr
!= NULL
; curr
= curr
->next
)
1439 update_costs_from_allocno (allocno
, curr
->hard_regno
,
1440 curr
->divisor
, true, false);
1441 free_update_cost_record_list (records
);
1442 ALLOCNO_COLOR_DATA (allocno
)->update_cost_records
= NULL
;
1445 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1446 of ACLASS by conflict costs of the unassigned allocnos
1447 connected by copies with allocnos in update_cost_queue. This
1448 update increases chances to remove some copies. */
1450 update_conflict_hard_regno_costs (int *costs
, enum reg_class aclass
,
1453 int i
, cost
, class_size
, freq
, mult
, div
, divisor
;
1454 int index
, hard_regno
;
1455 int *conflict_costs
;
1457 enum reg_class another_aclass
;
1458 ira_allocno_t allocno
, another_allocno
, from
;
1459 ira_copy_t cp
, next_cp
;
1461 while (get_next_update_cost (&allocno
, &from
, &divisor
))
1462 for (cp
= ALLOCNO_COPIES (allocno
); cp
!= NULL
; cp
= next_cp
)
1464 if (cp
->first
== allocno
)
1466 next_cp
= cp
->next_first_allocno_copy
;
1467 another_allocno
= cp
->second
;
1469 else if (cp
->second
== allocno
)
1471 next_cp
= cp
->next_second_allocno_copy
;
1472 another_allocno
= cp
->first
;
1477 if (another_allocno
== from
)
1480 another_aclass
= ALLOCNO_CLASS (another_allocno
);
1481 if (! ira_reg_classes_intersect_p
[aclass
][another_aclass
]
1482 || ALLOCNO_ASSIGNED_P (another_allocno
)
1483 || ALLOCNO_COLOR_DATA (another_allocno
)->may_be_spilled_p
)
1485 class_size
= ira_class_hard_regs_num
[another_aclass
];
1486 ira_allocate_and_copy_costs
1487 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
),
1488 another_aclass
, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno
));
1490 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno
);
1491 if (conflict_costs
== NULL
)
1496 freq
= ALLOCNO_FREQ (another_allocno
);
1499 div
= freq
* divisor
;
1501 for (i
= class_size
- 1; i
>= 0; i
--)
1503 hard_regno
= ira_class_hard_regs
[another_aclass
][i
];
1504 ira_assert (hard_regno
>= 0);
1505 index
= ira_class_hard_reg_index
[aclass
][hard_regno
];
1508 cost
= (int) ((unsigned) conflict_costs
[i
] * mult
) / div
;
1514 costs
[index
] += cost
;
1517 /* Probably 5 hops will be enough. */
1519 && divisor
<= (COST_HOP_DIVISOR
1522 * COST_HOP_DIVISOR
))
1523 queue_update_cost (another_allocno
, allocno
, divisor
* COST_HOP_DIVISOR
);
1527 /* Set up conflicting (through CONFLICT_REGS) for each object of
1528 allocno A and the start allocno profitable regs (through
1529 START_PROFITABLE_REGS). Remember that the start profitable regs
1530 exclude hard regs which can not hold value of mode of allocno A.
1531 This covers mostly cases when multi-register value should be
1534 get_conflict_and_start_profitable_regs (ira_allocno_t a
, bool retry_p
,
1535 HARD_REG_SET
*conflict_regs
,
1536 HARD_REG_SET
*start_profitable_regs
)
1541 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1542 for (i
= 0; i
< nwords
; i
++)
1544 obj
= ALLOCNO_OBJECT (a
, i
);
1545 COPY_HARD_REG_SET (conflict_regs
[i
],
1546 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
1550 COPY_HARD_REG_SET (*start_profitable_regs
,
1551 reg_class_contents
[ALLOCNO_CLASS (a
)]);
1552 AND_COMPL_HARD_REG_SET (*start_profitable_regs
,
1553 ira_prohibited_class_mode_regs
1554 [ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]);
1557 COPY_HARD_REG_SET (*start_profitable_regs
,
1558 ALLOCNO_COLOR_DATA (a
)->profitable_hard_regs
);
1561 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1562 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1564 check_hard_reg_p (ira_allocno_t a
, int hard_regno
,
1565 HARD_REG_SET
*conflict_regs
, HARD_REG_SET profitable_regs
)
1567 int j
, nwords
, nregs
;
1568 enum reg_class aclass
;
1569 enum machine_mode mode
;
1571 aclass
= ALLOCNO_CLASS (a
);
1572 mode
= ALLOCNO_MODE (a
);
1573 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs
[aclass
][mode
],
1576 /* Checking only profitable hard regs. */
1577 if (! TEST_HARD_REG_BIT (profitable_regs
, hard_regno
))
1579 nregs
= hard_regno_nregs
[hard_regno
][mode
];
1580 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1581 for (j
= 0; j
< nregs
; j
++)
1584 int set_to_test_start
= 0, set_to_test_end
= nwords
;
1586 if (nregs
== nwords
)
1588 if (REG_WORDS_BIG_ENDIAN
)
1589 set_to_test_start
= nwords
- j
- 1;
1591 set_to_test_start
= j
;
1592 set_to_test_end
= set_to_test_start
+ 1;
1594 for (k
= set_to_test_start
; k
< set_to_test_end
; k
++)
1595 if (TEST_HARD_REG_BIT (conflict_regs
[k
], hard_regno
+ j
))
1597 if (k
!= set_to_test_end
)
1603 /* Return number of registers needed to be saved and restored at
1604 function prologue/epilogue if we allocate HARD_REGNO to hold value
1607 calculate_saved_nregs (int hard_regno
, enum machine_mode mode
)
1612 ira_assert (hard_regno
>= 0);
1613 for (i
= hard_regno_nregs
[hard_regno
][mode
] - 1; i
>= 0; i
--)
1614 if (!allocated_hardreg_p
[hard_regno
+ i
]
1615 && !TEST_HARD_REG_BIT (call_used_reg_set
, hard_regno
+ i
)
1616 && !LOCAL_REGNO (hard_regno
+ i
))
1621 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1622 that the function called from function
1623 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1624 this case some allocno data are not defined or updated and we
1625 should not touch these data. The function returns true if we
1626 managed to assign a hard register to the allocno.
1628 To assign a hard register, first of all we calculate all conflict
1629 hard registers which can come from conflicting allocnos with
1630 already assigned hard registers. After that we find first free
1631 hard register with the minimal cost. During hard register cost
1632 calculation we take conflict hard register costs into account to
1633 give a chance for conflicting allocnos to get a better hard
1634 register in the future.
1636 If the best hard register cost is bigger than cost of memory usage
1637 for the allocno, we don't assign a hard register to given allocno
1640 If we assign a hard register to the allocno, we update costs of the
1641 hard register for allocnos connected by copies to improve a chance
1642 to coalesce insns represented by the copies when we assign hard
1643 registers to the allocnos connected by the copies. */
1645 assign_hard_reg (ira_allocno_t a
, bool retry_p
)
1647 HARD_REG_SET conflicting_regs
[2], profitable_hard_regs
;
1648 int i
, j
, hard_regno
, best_hard_regno
, class_size
;
1649 int cost
, mem_cost
, min_cost
, full_cost
, min_full_cost
, nwords
, word
;
1651 enum reg_class aclass
;
1652 enum machine_mode mode
;
1653 static int costs
[FIRST_PSEUDO_REGISTER
], full_costs
[FIRST_PSEUDO_REGISTER
];
1655 enum reg_class rclass
;
1658 bool no_stack_reg_p
;
1661 ira_assert (! ALLOCNO_ASSIGNED_P (a
));
1662 get_conflict_and_start_profitable_regs (a
, retry_p
,
1664 &profitable_hard_regs
);
1665 aclass
= ALLOCNO_CLASS (a
);
1666 class_size
= ira_class_hard_regs_num
[aclass
];
1667 best_hard_regno
= -1;
1668 memset (full_costs
, 0, sizeof (int) * class_size
);
1670 memset (costs
, 0, sizeof (int) * class_size
);
1671 memset (full_costs
, 0, sizeof (int) * class_size
);
1673 no_stack_reg_p
= false;
1676 start_update_cost ();
1677 mem_cost
+= ALLOCNO_UPDATED_MEMORY_COST (a
);
1679 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a
),
1680 aclass
, ALLOCNO_HARD_REG_COSTS (a
));
1681 a_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
);
1683 no_stack_reg_p
= no_stack_reg_p
|| ALLOCNO_TOTAL_NO_STACK_REG_P (a
);
1685 cost
= ALLOCNO_UPDATED_CLASS_COST (a
);
1686 for (i
= 0; i
< class_size
; i
++)
1687 if (a_costs
!= NULL
)
1689 costs
[i
] += a_costs
[i
];
1690 full_costs
[i
] += a_costs
[i
];
1695 full_costs
[i
] += cost
;
1697 nwords
= ALLOCNO_NUM_OBJECTS (a
);
1698 curr_allocno_process
++;
1699 for (word
= 0; word
< nwords
; word
++)
1701 ira_object_t conflict_obj
;
1702 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
1703 ira_object_conflict_iterator oci
;
1705 /* Take preferences of conflicting allocnos into account. */
1706 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
1708 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
1709 enum reg_class conflict_aclass
;
1711 /* Reload can give another class so we need to check all
1714 && (!bitmap_bit_p (consideration_allocno_bitmap
,
1715 ALLOCNO_NUM (conflict_a
))
1716 || ((!ALLOCNO_ASSIGNED_P (conflict_a
)
1717 || ALLOCNO_HARD_REGNO (conflict_a
) < 0)
1718 && !(hard_reg_set_intersect_p
1719 (profitable_hard_regs
,
1721 (conflict_a
)->profitable_hard_regs
)))))
1723 conflict_aclass
= ALLOCNO_CLASS (conflict_a
);
1724 ira_assert (ira_reg_classes_intersect_p
1725 [aclass
][conflict_aclass
]);
1726 if (ALLOCNO_ASSIGNED_P (conflict_a
))
1728 hard_regno
= ALLOCNO_HARD_REGNO (conflict_a
);
1730 && (ira_hard_reg_set_intersection_p
1731 (hard_regno
, ALLOCNO_MODE (conflict_a
),
1732 reg_class_contents
[aclass
])))
1734 int n_objects
= ALLOCNO_NUM_OBJECTS (conflict_a
);
1737 mode
= ALLOCNO_MODE (conflict_a
);
1738 conflict_nregs
= hard_regno_nregs
[hard_regno
][mode
];
1739 if (conflict_nregs
== n_objects
&& conflict_nregs
> 1)
1741 int num
= OBJECT_SUBWORD (conflict_obj
);
1743 if (REG_WORDS_BIG_ENDIAN
)
1744 SET_HARD_REG_BIT (conflicting_regs
[word
],
1745 hard_regno
+ n_objects
- num
- 1);
1747 SET_HARD_REG_BIT (conflicting_regs
[word
],
1752 (conflicting_regs
[word
],
1753 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
1754 if (hard_reg_set_subset_p (profitable_hard_regs
,
1755 conflicting_regs
[word
]))
1760 && ! ALLOCNO_COLOR_DATA (conflict_a
)->may_be_spilled_p
1761 /* Don't process the conflict allocno twice. */
1762 && (ALLOCNO_COLOR_DATA (conflict_a
)->last_process
1763 != curr_allocno_process
))
1765 int k
, *conflict_costs
;
1767 ALLOCNO_COLOR_DATA (conflict_a
)->last_process
1768 = curr_allocno_process
;
1769 ira_allocate_and_copy_costs
1770 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a
),
1772 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a
));
1774 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a
);
1775 if (conflict_costs
!= NULL
)
1776 for (j
= class_size
- 1; j
>= 0; j
--)
1778 hard_regno
= ira_class_hard_regs
[aclass
][j
];
1779 ira_assert (hard_regno
>= 0);
1780 k
= ira_class_hard_reg_index
[conflict_aclass
][hard_regno
];
1783 full_costs
[j
] -= conflict_costs
[k
];
1785 queue_update_cost (conflict_a
, NULL
, COST_HOP_DIVISOR
);
1791 /* Take into account preferences of allocnos connected by copies to
1792 the conflict allocnos. */
1793 update_conflict_hard_regno_costs (full_costs
, aclass
, true);
1795 /* Take preferences of allocnos connected by copies into
1799 start_update_cost ();
1800 queue_update_cost (a
, NULL
, COST_HOP_DIVISOR
);
1801 update_conflict_hard_regno_costs (full_costs
, aclass
, false);
1803 min_cost
= min_full_cost
= INT_MAX
;
1804 /* We don't care about giving callee saved registers to allocnos no
1805 living through calls because call clobbered registers are
1806 allocated first (it is usual practice to put them first in
1807 REG_ALLOC_ORDER). */
1808 mode
= ALLOCNO_MODE (a
);
1809 for (i
= 0; i
< class_size
; i
++)
1811 hard_regno
= ira_class_hard_regs
[aclass
][i
];
1814 && FIRST_STACK_REG
<= hard_regno
&& hard_regno
<= LAST_STACK_REG
)
1817 if (! check_hard_reg_p (a
, hard_regno
,
1818 conflicting_regs
, profitable_hard_regs
))
1821 full_cost
= full_costs
[i
];
1822 if (!HONOR_REG_ALLOC_ORDER
)
1824 if ((saved_nregs
= calculate_saved_nregs (hard_regno
, mode
)) != 0)
1825 /* We need to save/restore the hard register in
1826 epilogue/prologue. Therefore we increase the cost. */
1828 rclass
= REGNO_REG_CLASS (hard_regno
);
1829 add_cost
= ((ira_memory_move_cost
[mode
][rclass
][0]
1830 + ira_memory_move_cost
[mode
][rclass
][1])
1831 * saved_nregs
/ hard_regno_nregs
[hard_regno
][mode
] - 1);
1833 full_cost
+= add_cost
;
1836 if (min_cost
> cost
)
1838 if (min_full_cost
> full_cost
)
1840 min_full_cost
= full_cost
;
1841 best_hard_regno
= hard_regno
;
1842 ira_assert (hard_regno
>= 0);
1845 if (min_full_cost
> mem_cost
)
1847 if (! retry_p
&& internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
1848 fprintf (ira_dump_file
, "(memory is more profitable %d vs %d) ",
1849 mem_cost
, min_full_cost
);
1850 best_hard_regno
= -1;
1853 if (best_hard_regno
>= 0)
1855 for (i
= hard_regno_nregs
[best_hard_regno
][mode
] - 1; i
>= 0; i
--)
1856 allocated_hardreg_p
[best_hard_regno
+ i
] = true;
1859 restore_costs_from_copies (a
);
1860 ALLOCNO_HARD_REGNO (a
) = best_hard_regno
;
1861 ALLOCNO_ASSIGNED_P (a
) = true;
1862 if (best_hard_regno
>= 0)
1863 update_costs_from_copies (a
, true, ! retry_p
);
1864 ira_assert (ALLOCNO_CLASS (a
) == aclass
);
1865 /* We don't need updated costs anymore: */
1866 ira_free_allocno_updated_costs (a
);
1867 return best_hard_regno
>= 0;
1872 /* An array used to sort copies. */
1873 static ira_copy_t
*sorted_copies
;
1875 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1876 used to find a conflict for new allocnos or allocnos with the
1877 different allocno classes. */
1879 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1
, ira_allocno_t a2
)
1883 int n1
= ALLOCNO_NUM_OBJECTS (a1
);
1884 int n2
= ALLOCNO_NUM_OBJECTS (a2
);
1888 reg1
= regno_reg_rtx
[ALLOCNO_REGNO (a1
)];
1889 reg2
= regno_reg_rtx
[ALLOCNO_REGNO (a2
)];
1890 if (reg1
!= NULL
&& reg2
!= NULL
1891 && ORIGINAL_REGNO (reg1
) == ORIGINAL_REGNO (reg2
))
1894 for (i
= 0; i
< n1
; i
++)
1896 ira_object_t c1
= ALLOCNO_OBJECT (a1
, i
);
1898 for (j
= 0; j
< n2
; j
++)
1900 ira_object_t c2
= ALLOCNO_OBJECT (a2
, j
);
1902 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1
),
1903 OBJECT_LIVE_RANGES (c2
)))
1910 /* The function is used to sort copies according to their execution
1913 copy_freq_compare_func (const void *v1p
, const void *v2p
)
1915 ira_copy_t cp1
= *(const ira_copy_t
*) v1p
, cp2
= *(const ira_copy_t
*) v2p
;
1923 /* If freqencies are equal, sort by copies, so that the results of
1924 qsort leave nothing to chance. */
1925 return cp1
->num
- cp2
->num
;
1930 /* Return true if any allocno from thread of A1 conflicts with any
1931 allocno from thread A2. */
1933 allocno_thread_conflict_p (ira_allocno_t a1
, ira_allocno_t a2
)
1935 ira_allocno_t a
, conflict_a
;
1937 for (a
= ALLOCNO_COLOR_DATA (a2
)->next_thread_allocno
;;
1938 a
= ALLOCNO_COLOR_DATA (a
)->next_thread_allocno
)
1940 for (conflict_a
= ALLOCNO_COLOR_DATA (a1
)->next_thread_allocno
;;
1941 conflict_a
= ALLOCNO_COLOR_DATA (conflict_a
)->next_thread_allocno
)
1943 if (allocnos_conflict_by_live_ranges_p (a
, conflict_a
))
1945 if (conflict_a
== a1
)
1954 /* Merge two threads given correspondingly by their first allocnos T1
1955 and T2 (more accurately merging T2 into T1). */
1957 merge_threads (ira_allocno_t t1
, ira_allocno_t t2
)
1959 ira_allocno_t a
, next
, last
;
1961 gcc_assert (t1
!= t2
1962 && ALLOCNO_COLOR_DATA (t1
)->first_thread_allocno
== t1
1963 && ALLOCNO_COLOR_DATA (t2
)->first_thread_allocno
== t2
);
1964 for (last
= t2
, a
= ALLOCNO_COLOR_DATA (t2
)->next_thread_allocno
;;
1965 a
= ALLOCNO_COLOR_DATA (a
)->next_thread_allocno
)
1967 ALLOCNO_COLOR_DATA (a
)->first_thread_allocno
= t1
;
1972 next
= ALLOCNO_COLOR_DATA (t1
)->next_thread_allocno
;
1973 ALLOCNO_COLOR_DATA (t1
)->next_thread_allocno
= t2
;
1974 ALLOCNO_COLOR_DATA (last
)->next_thread_allocno
= next
;
1975 ALLOCNO_COLOR_DATA (t1
)->thread_freq
+= ALLOCNO_COLOR_DATA (t2
)->thread_freq
;
1978 /* Create threads by processing CP_NUM copies from sorted)ciopeis. We
1979 process the most expensive copies first. */
1981 form_threads_from_copies (int cp_num
)
1983 ira_allocno_t a
, thread1
, thread2
;
1987 qsort (sorted_copies
, cp_num
, sizeof (ira_copy_t
), copy_freq_compare_func
);
1988 /* Form threads processing copies, most frequently executed
1990 for (; cp_num
!= 0;)
1992 for (i
= 0; i
< cp_num
; i
++)
1994 cp
= sorted_copies
[i
];
1995 thread1
= ALLOCNO_COLOR_DATA (cp
->first
)->first_thread_allocno
;
1996 thread2
= ALLOCNO_COLOR_DATA (cp
->second
)->first_thread_allocno
;
1997 if (thread1
== thread2
)
1999 if (! allocno_thread_conflict_p (thread1
, thread2
))
2001 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2004 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2005 cp
->num
, ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
2006 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
),
2008 merge_threads (thread1
, thread2
);
2009 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2011 thread1
= ALLOCNO_COLOR_DATA (thread1
)->first_thread_allocno
;
2012 fprintf (ira_dump_file
, " Result (freq=%d): a%dr%d(%d)",
2013 ALLOCNO_COLOR_DATA (thread1
)->thread_freq
,
2014 ALLOCNO_NUM (thread1
), ALLOCNO_REGNO (thread1
),
2015 ALLOCNO_FREQ (thread1
));
2016 for (a
= ALLOCNO_COLOR_DATA (thread1
)->next_thread_allocno
;
2018 a
= ALLOCNO_COLOR_DATA (a
)->next_thread_allocno
)
2019 fprintf (ira_dump_file
, " a%dr%d(%d)",
2020 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
),
2022 fprintf (ira_dump_file
, "\n");
2028 /* Collect the rest of copies. */
2029 for (n
= 0; i
< cp_num
; i
++)
2031 cp
= sorted_copies
[i
];
2032 if (ALLOCNO_COLOR_DATA (cp
->first
)->first_thread_allocno
2033 != ALLOCNO_COLOR_DATA (cp
->second
)->first_thread_allocno
)
2034 sorted_copies
[n
++] = cp
;
2040 /* Create threads by processing copies of all alocnos from BUCKET. We
2041 process the most expensive copies first. */
2043 form_threads_from_bucket (ira_allocno_t bucket
)
2046 ira_copy_t cp
, next_cp
;
2049 for (a
= bucket
; a
!= NULL
; a
= ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
)
2051 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
2055 next_cp
= cp
->next_first_allocno_copy
;
2056 sorted_copies
[cp_num
++] = cp
;
2058 else if (cp
->second
== a
)
2059 next_cp
= cp
->next_second_allocno_copy
;
2064 form_threads_from_copies (cp_num
);
2067 /* Create threads by processing copies of colorable allocno A. We
2068 process most expensive copies first. */
2070 form_threads_from_colorable_allocno (ira_allocno_t a
)
2072 ira_allocno_t another_a
;
2073 ira_copy_t cp
, next_cp
;
2076 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
2080 next_cp
= cp
->next_first_allocno_copy
;
2081 another_a
= cp
->second
;
2083 else if (cp
->second
== a
)
2085 next_cp
= cp
->next_second_allocno_copy
;
2086 another_a
= cp
->first
;
2090 if ((! ALLOCNO_COLOR_DATA (another_a
)->in_graph_p
2091 && !ALLOCNO_COLOR_DATA (another_a
)->may_be_spilled_p
)
2092 || ALLOCNO_COLOR_DATA (another_a
)->colorable_p
)
2093 sorted_copies
[cp_num
++] = cp
;
2095 form_threads_from_copies (cp_num
);
2098 /* Form initial threads which contain only one allocno. */
2100 init_allocno_threads (void)
2106 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
2108 a
= ira_allocnos
[j
];
2109 /* Set up initial thread data: */
2110 ALLOCNO_COLOR_DATA (a
)->first_thread_allocno
2111 = ALLOCNO_COLOR_DATA (a
)->next_thread_allocno
= a
;
2112 ALLOCNO_COLOR_DATA (a
)->thread_freq
= ALLOCNO_FREQ (a
);
2118 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2120 /* Bucket of allocnos that can colored currently without spilling. */
2121 static ira_allocno_t colorable_allocno_bucket
;
2123 /* Bucket of allocnos that might be not colored currently without
2125 static ira_allocno_t uncolorable_allocno_bucket
;
2127 /* The current number of allocnos in the uncolorable_bucket. */
2128 static int uncolorable_allocnos_num
;
2130 /* Return the current spill priority of allocno A. The less the
2131 number, the more preferable the allocno for spilling. */
2133 allocno_spill_priority (ira_allocno_t a
)
2135 allocno_color_data_t data
= ALLOCNO_COLOR_DATA (a
);
2138 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
)
2139 * ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]
2143 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2146 add_allocno_to_bucket (ira_allocno_t a
, ira_allocno_t
*bucket_ptr
)
2148 ira_allocno_t first_a
;
2149 allocno_color_data_t data
;
2151 if (bucket_ptr
== &uncolorable_allocno_bucket
2152 && ALLOCNO_CLASS (a
) != NO_REGS
)
2154 uncolorable_allocnos_num
++;
2155 ira_assert (uncolorable_allocnos_num
> 0);
2157 first_a
= *bucket_ptr
;
2158 data
= ALLOCNO_COLOR_DATA (a
);
2159 data
->next_bucket_allocno
= first_a
;
2160 data
->prev_bucket_allocno
= NULL
;
2161 if (first_a
!= NULL
)
2162 ALLOCNO_COLOR_DATA (first_a
)->prev_bucket_allocno
= a
;
2166 /* Compare two allocnos to define which allocno should be pushed first
2167 into the coloring stack. If the return is a negative number, the
2168 allocno given by the first parameter will be pushed first. In this
2169 case such allocno has less priority than the second one and the
2170 hard register will be assigned to it after assignment to the second
2171 one. As the result of such assignment order, the second allocno
2172 has a better chance to get the best hard register. */
2174 bucket_allocno_compare_func (const void *v1p
, const void *v2p
)
2176 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2177 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2178 int diff
, freq1
, freq2
, a1_num
, a2_num
;
2179 ira_allocno_t t1
= ALLOCNO_COLOR_DATA (a1
)->first_thread_allocno
;
2180 ira_allocno_t t2
= ALLOCNO_COLOR_DATA (a2
)->first_thread_allocno
;
2181 int cl1
= ALLOCNO_CLASS (a1
), cl2
= ALLOCNO_CLASS (a2
);
2183 freq1
= ALLOCNO_COLOR_DATA (t1
)->thread_freq
;
2184 freq2
= ALLOCNO_COLOR_DATA (t2
)->thread_freq
;
2185 if ((diff
= freq1
- freq2
) != 0)
2188 if ((diff
= ALLOCNO_NUM (t2
) - ALLOCNO_NUM (t1
)) != 0)
2191 /* Push pseudos requiring less hard registers first. It means that
2192 we will assign pseudos requiring more hard registers first
2193 avoiding creation small holes in free hard register file into
2194 which the pseudos requiring more hard registers can not fit. */
2195 if ((diff
= (ira_reg_class_max_nregs
[cl1
][ALLOCNO_MODE (a1
)]
2196 - ira_reg_class_max_nregs
[cl2
][ALLOCNO_MODE (a2
)])) != 0)
2199 freq1
= ALLOCNO_FREQ (a1
);
2200 freq2
= ALLOCNO_FREQ (a2
);
2201 if ((diff
= freq1
- freq2
) != 0)
2204 a1_num
= ALLOCNO_COLOR_DATA (a1
)->available_regs_num
;
2205 a2_num
= ALLOCNO_COLOR_DATA (a2
)->available_regs_num
;
2206 if ((diff
= a2_num
- a1_num
) != 0)
2208 return ALLOCNO_NUM (a2
) - ALLOCNO_NUM (a1
);
2211 /* Sort bucket *BUCKET_PTR and return the result through
2214 sort_bucket (ira_allocno_t
*bucket_ptr
,
2215 int (*compare_func
) (const void *, const void *))
2217 ira_allocno_t a
, head
;
2220 for (n
= 0, a
= *bucket_ptr
;
2222 a
= ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
)
2223 sorted_allocnos
[n
++] = a
;
2226 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
), compare_func
);
2228 for (n
--; n
>= 0; n
--)
2230 a
= sorted_allocnos
[n
];
2231 ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
= head
;
2232 ALLOCNO_COLOR_DATA (a
)->prev_bucket_allocno
= NULL
;
2234 ALLOCNO_COLOR_DATA (head
)->prev_bucket_allocno
= a
;
2240 /* Add ALLOCNO to colorable bucket maintaining the order according
2241 their priority. ALLOCNO should be not in a bucket before the
2244 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno
)
2246 ira_allocno_t before
, after
;
2248 form_threads_from_colorable_allocno (allocno
);
2249 for (before
= colorable_allocno_bucket
, after
= NULL
;
2252 before
= ALLOCNO_COLOR_DATA (before
)->next_bucket_allocno
)
2253 if (bucket_allocno_compare_func (&allocno
, &before
) < 0)
2255 ALLOCNO_COLOR_DATA (allocno
)->next_bucket_allocno
= before
;
2256 ALLOCNO_COLOR_DATA (allocno
)->prev_bucket_allocno
= after
;
2258 colorable_allocno_bucket
= allocno
;
2260 ALLOCNO_COLOR_DATA (after
)->next_bucket_allocno
= allocno
;
2262 ALLOCNO_COLOR_DATA (before
)->prev_bucket_allocno
= allocno
;
2265 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2268 delete_allocno_from_bucket (ira_allocno_t allocno
, ira_allocno_t
*bucket_ptr
)
2270 ira_allocno_t prev_allocno
, next_allocno
;
2272 if (bucket_ptr
== &uncolorable_allocno_bucket
2273 && ALLOCNO_CLASS (allocno
) != NO_REGS
)
2275 uncolorable_allocnos_num
--;
2276 ira_assert (uncolorable_allocnos_num
>= 0);
2278 prev_allocno
= ALLOCNO_COLOR_DATA (allocno
)->prev_bucket_allocno
;
2279 next_allocno
= ALLOCNO_COLOR_DATA (allocno
)->next_bucket_allocno
;
2280 if (prev_allocno
!= NULL
)
2281 ALLOCNO_COLOR_DATA (prev_allocno
)->next_bucket_allocno
= next_allocno
;
2284 ira_assert (*bucket_ptr
== allocno
);
2285 *bucket_ptr
= next_allocno
;
2287 if (next_allocno
!= NULL
)
2288 ALLOCNO_COLOR_DATA (next_allocno
)->prev_bucket_allocno
= prev_allocno
;
2291 /* Put allocno A onto the coloring stack without removing it from its
2292 bucket. Pushing allocno to the coloring stack can result in moving
2293 conflicting allocnos from the uncolorable bucket to the colorable
2296 push_allocno_to_stack (ira_allocno_t a
)
2298 enum reg_class aclass
;
2299 allocno_color_data_t data
, conflict_data
;
2300 int size
, i
, n
= ALLOCNO_NUM_OBJECTS (a
);
2302 data
= ALLOCNO_COLOR_DATA (a
);
2303 data
->in_graph_p
= false;
2304 allocno_stack_vec
.safe_push (a
);
2305 aclass
= ALLOCNO_CLASS (a
);
2306 if (aclass
== NO_REGS
)
2308 size
= ira_reg_class_max_nregs
[aclass
][ALLOCNO_MODE (a
)];
2311 /* We will deal with the subwords individually. */
2312 gcc_assert (size
== ALLOCNO_NUM_OBJECTS (a
));
2315 for (i
= 0; i
< n
; i
++)
2317 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2318 ira_object_t conflict_obj
;
2319 ira_object_conflict_iterator oci
;
2321 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2323 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2325 conflict_data
= ALLOCNO_COLOR_DATA (conflict_a
);
2326 if (conflict_data
->colorable_p
2327 || ! conflict_data
->in_graph_p
2328 || ALLOCNO_ASSIGNED_P (conflict_a
)
2329 || !(hard_reg_set_intersect_p
2330 (ALLOCNO_COLOR_DATA (a
)->profitable_hard_regs
,
2331 conflict_data
->profitable_hard_regs
)))
2333 ira_assert (bitmap_bit_p (coloring_allocno_bitmap
,
2334 ALLOCNO_NUM (conflict_a
)));
2335 if (update_left_conflict_sizes_p (conflict_a
, a
, size
))
2337 delete_allocno_from_bucket
2338 (conflict_a
, &uncolorable_allocno_bucket
);
2339 add_allocno_to_ordered_colorable_bucket (conflict_a
);
2340 if (internal_flag_ira_verbose
> 4 && ira_dump_file
!= NULL
)
2342 fprintf (ira_dump_file
, " Making");
2343 ira_print_expanded_allocno (conflict_a
);
2344 fprintf (ira_dump_file
, " colorable\n");
2352 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2353 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2355 remove_allocno_from_bucket_and_push (ira_allocno_t allocno
, bool colorable_p
)
2358 delete_allocno_from_bucket (allocno
, &colorable_allocno_bucket
);
2360 delete_allocno_from_bucket (allocno
, &uncolorable_allocno_bucket
);
2361 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2363 fprintf (ira_dump_file
, " Pushing");
2364 ira_print_expanded_allocno (allocno
);
2366 fprintf (ira_dump_file
, "(cost %d)\n",
2367 ALLOCNO_COLOR_DATA (allocno
)->temp
);
2369 fprintf (ira_dump_file
, "(potential spill: %spri=%d, cost=%d)\n",
2370 ALLOCNO_BAD_SPILL_P (allocno
) ? "bad spill, " : "",
2371 allocno_spill_priority (allocno
),
2372 ALLOCNO_COLOR_DATA (allocno
)->temp
);
2375 ALLOCNO_COLOR_DATA (allocno
)->may_be_spilled_p
= true;
2376 push_allocno_to_stack (allocno
);
2379 /* Put all allocnos from colorable bucket onto the coloring stack. */
2381 push_only_colorable (void)
2383 form_threads_from_bucket (colorable_allocno_bucket
);
2384 sort_bucket (&colorable_allocno_bucket
, bucket_allocno_compare_func
);
2385 for (;colorable_allocno_bucket
!= NULL
;)
2386 remove_allocno_from_bucket_and_push (colorable_allocno_bucket
, true);
2389 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2390 loop given by its LOOP_NODE. */
2392 ira_loop_edge_freq (ira_loop_tree_node_t loop_node
, int regno
, bool exit_p
)
2399 ira_assert (current_loops
!= NULL
&& loop_node
->loop
!= NULL
2400 && (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
));
2404 FOR_EACH_EDGE (e
, ei
, loop_node
->loop
->header
->preds
)
2405 if (e
->src
!= loop_node
->loop
->latch
2407 || (bitmap_bit_p (df_get_live_out (e
->src
), regno
)
2408 && bitmap_bit_p (df_get_live_in (e
->dest
), regno
))))
2409 freq
+= EDGE_FREQUENCY (e
);
2413 edges
= get_loop_exit_edges (loop_node
->loop
);
2414 FOR_EACH_VEC_ELT (edges
, i
, e
)
2416 || (bitmap_bit_p (df_get_live_out (e
->src
), regno
)
2417 && bitmap_bit_p (df_get_live_in (e
->dest
), regno
)))
2418 freq
+= EDGE_FREQUENCY (e
);
2422 return REG_FREQ_FROM_EDGE_FREQ (freq
);
2425 /* Calculate and return the cost of putting allocno A into memory. */
2427 calculate_allocno_spill_cost (ira_allocno_t a
)
2430 enum machine_mode mode
;
2431 enum reg_class rclass
;
2432 ira_allocno_t parent_allocno
;
2433 ira_loop_tree_node_t parent_node
, loop_node
;
2435 regno
= ALLOCNO_REGNO (a
);
2436 cost
= ALLOCNO_UPDATED_MEMORY_COST (a
) - ALLOCNO_UPDATED_CLASS_COST (a
);
2437 if (ALLOCNO_CAP (a
) != NULL
)
2439 loop_node
= ALLOCNO_LOOP_TREE_NODE (a
);
2440 if ((parent_node
= loop_node
->parent
) == NULL
)
2442 if ((parent_allocno
= parent_node
->regno_allocno_map
[regno
]) == NULL
)
2444 mode
= ALLOCNO_MODE (a
);
2445 rclass
= ALLOCNO_CLASS (a
);
2446 if (ALLOCNO_HARD_REGNO (parent_allocno
) < 0)
2447 cost
-= (ira_memory_move_cost
[mode
][rclass
][0]
2448 * ira_loop_edge_freq (loop_node
, regno
, true)
2449 + ira_memory_move_cost
[mode
][rclass
][1]
2450 * ira_loop_edge_freq (loop_node
, regno
, false));
2453 ira_init_register_move_cost_if_necessary (mode
);
2454 cost
+= ((ira_memory_move_cost
[mode
][rclass
][1]
2455 * ira_loop_edge_freq (loop_node
, regno
, true)
2456 + ira_memory_move_cost
[mode
][rclass
][0]
2457 * ira_loop_edge_freq (loop_node
, regno
, false))
2458 - (ira_register_move_cost
[mode
][rclass
][rclass
]
2459 * (ira_loop_edge_freq (loop_node
, regno
, false)
2460 + ira_loop_edge_freq (loop_node
, regno
, true))));
2465 /* Used for sorting allocnos for spilling. */
2467 allocno_spill_priority_compare (ira_allocno_t a1
, ira_allocno_t a2
)
2469 int pri1
, pri2
, diff
;
2471 if (ALLOCNO_BAD_SPILL_P (a1
) && ! ALLOCNO_BAD_SPILL_P (a2
))
2473 if (ALLOCNO_BAD_SPILL_P (a2
) && ! ALLOCNO_BAD_SPILL_P (a1
))
2475 pri1
= allocno_spill_priority (a1
);
2476 pri2
= allocno_spill_priority (a2
);
2477 if ((diff
= pri1
- pri2
) != 0)
2480 = ALLOCNO_COLOR_DATA (a1
)->temp
- ALLOCNO_COLOR_DATA (a2
)->temp
) != 0)
2482 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2485 /* Used for sorting allocnos for spilling. */
2487 allocno_spill_sort_compare (const void *v1p
, const void *v2p
)
2489 ira_allocno_t p1
= *(const ira_allocno_t
*) v1p
;
2490 ira_allocno_t p2
= *(const ira_allocno_t
*) v2p
;
2492 return allocno_spill_priority_compare (p1
, p2
);
2495 /* Push allocnos to the coloring stack. The order of allocnos in the
2496 stack defines the order for the subsequent coloring. */
2498 push_allocnos_to_stack (void)
2503 /* Calculate uncolorable allocno spill costs. */
2504 for (a
= uncolorable_allocno_bucket
;
2506 a
= ALLOCNO_COLOR_DATA (a
)->next_bucket_allocno
)
2507 if (ALLOCNO_CLASS (a
) != NO_REGS
)
2509 cost
= calculate_allocno_spill_cost (a
);
2510 /* ??? Remove cost of copies between the coalesced
2512 ALLOCNO_COLOR_DATA (a
)->temp
= cost
;
2514 sort_bucket (&uncolorable_allocno_bucket
, allocno_spill_sort_compare
);
2517 push_only_colorable ();
2518 a
= uncolorable_allocno_bucket
;
2521 remove_allocno_from_bucket_and_push (a
, false);
2523 ira_assert (colorable_allocno_bucket
== NULL
2524 && uncolorable_allocno_bucket
== NULL
);
2525 ira_assert (uncolorable_allocnos_num
== 0);
2528 /* Pop the coloring stack and assign hard registers to the popped
2531 pop_allocnos_from_stack (void)
2533 ira_allocno_t allocno
;
2534 enum reg_class aclass
;
2536 for (;allocno_stack_vec
.length () != 0;)
2538 allocno
= allocno_stack_vec
.pop ();
2539 aclass
= ALLOCNO_CLASS (allocno
);
2540 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2542 fprintf (ira_dump_file
, " Popping");
2543 ira_print_expanded_allocno (allocno
);
2544 fprintf (ira_dump_file
, " -- ");
2546 if (aclass
== NO_REGS
)
2548 ALLOCNO_HARD_REGNO (allocno
) = -1;
2549 ALLOCNO_ASSIGNED_P (allocno
) = true;
2550 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno
) == NULL
);
2552 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno
) == NULL
);
2553 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2554 fprintf (ira_dump_file
, "assign memory\n");
2556 else if (assign_hard_reg (allocno
, false))
2558 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2559 fprintf (ira_dump_file
, "assign reg %d\n",
2560 ALLOCNO_HARD_REGNO (allocno
));
2562 else if (ALLOCNO_ASSIGNED_P (allocno
))
2564 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2565 fprintf (ira_dump_file
, "spill%s\n",
2566 ALLOCNO_COLOR_DATA (allocno
)->may_be_spilled_p
2569 ALLOCNO_COLOR_DATA (allocno
)->in_graph_p
= true;
2573 /* Set up number of available hard registers for allocno A. */
2575 setup_allocno_available_regs_num (ira_allocno_t a
)
2577 int i
, n
, hard_regno
, hard_regs_num
, nwords
;
2578 enum reg_class aclass
;
2579 allocno_color_data_t data
;
2581 aclass
= ALLOCNO_CLASS (a
);
2582 data
= ALLOCNO_COLOR_DATA (a
);
2583 data
->available_regs_num
= 0;
2584 if (aclass
== NO_REGS
)
2586 hard_regs_num
= ira_class_hard_regs_num
[aclass
];
2587 nwords
= ALLOCNO_NUM_OBJECTS (a
);
2588 for (n
= 0, i
= hard_regs_num
- 1; i
>= 0; i
--)
2590 hard_regno
= ira_class_hard_regs
[aclass
][i
];
2591 /* Checking only profitable hard regs. */
2592 if (TEST_HARD_REG_BIT (data
->profitable_hard_regs
, hard_regno
))
2595 data
->available_regs_num
= n
;
2596 if (internal_flag_ira_verbose
<= 2 || ira_dump_file
== NULL
)
2600 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2601 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
),
2602 reg_class_names
[aclass
], ira_class_hard_regs_num
[aclass
], n
);
2603 print_hard_reg_set (ira_dump_file
, data
->profitable_hard_regs
, false);
2604 fprintf (ira_dump_file
, ", %snode: ",
2605 hard_reg_set_equal_p (data
->profitable_hard_regs
,
2606 data
->hard_regs_node
->hard_regs
->set
)
2608 print_hard_reg_set (ira_dump_file
,
2609 data
->hard_regs_node
->hard_regs
->set
, false);
2610 for (i
= 0; i
< nwords
; i
++)
2612 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
2617 fprintf (ira_dump_file
, ", ");
2618 fprintf (ira_dump_file
, " obj %d", i
);
2620 fprintf (ira_dump_file
, " (confl regs = ");
2621 print_hard_reg_set (ira_dump_file
, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
2623 fprintf (ira_dump_file
, ")");
2625 fprintf (ira_dump_file
, "\n");
2628 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2629 conflicting allocnos and hard registers. */
2631 put_allocno_into_bucket (ira_allocno_t allocno
)
2633 ALLOCNO_COLOR_DATA (allocno
)->in_graph_p
= true;
2634 setup_allocno_available_regs_num (allocno
);
2635 if (setup_left_conflict_sizes_p (allocno
))
2636 add_allocno_to_bucket (allocno
, &colorable_allocno_bucket
);
2638 add_allocno_to_bucket (allocno
, &uncolorable_allocno_bucket
);
2641 /* Map: allocno number -> allocno priority. */
2642 static int *allocno_priorities
;
2644 /* Set up priorities for N allocnos in array
2645 CONSIDERATION_ALLOCNOS. */
2647 setup_allocno_priorities (ira_allocno_t
*consideration_allocnos
, int n
)
2649 int i
, length
, nrefs
, priority
, max_priority
, mult
;
2653 for (i
= 0; i
< n
; i
++)
2655 a
= consideration_allocnos
[i
];
2656 nrefs
= ALLOCNO_NREFS (a
);
2657 ira_assert (nrefs
>= 0);
2658 mult
= floor_log2 (ALLOCNO_NREFS (a
)) + 1;
2659 ira_assert (mult
>= 0);
2660 allocno_priorities
[ALLOCNO_NUM (a
)]
2663 * (ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
))
2664 * ira_reg_class_max_nregs
[ALLOCNO_CLASS (a
)][ALLOCNO_MODE (a
)]);
2666 priority
= -priority
;
2667 if (max_priority
< priority
)
2668 max_priority
= priority
;
2670 mult
= max_priority
== 0 ? 1 : INT_MAX
/ max_priority
;
2671 for (i
= 0; i
< n
; i
++)
2673 a
= consideration_allocnos
[i
];
2674 length
= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
);
2675 if (ALLOCNO_NUM_OBJECTS (a
) > 1)
2676 length
/= ALLOCNO_NUM_OBJECTS (a
);
2679 allocno_priorities
[ALLOCNO_NUM (a
)]
2680 = allocno_priorities
[ALLOCNO_NUM (a
)] * mult
/ length
;
2684 /* Sort allocnos according to the profit of usage of a hard register
2685 instead of memory for them. */
2687 allocno_cost_compare_func (const void *v1p
, const void *v2p
)
2689 ira_allocno_t p1
= *(const ira_allocno_t
*) v1p
;
2690 ira_allocno_t p2
= *(const ira_allocno_t
*) v2p
;
2693 c1
= ALLOCNO_UPDATED_MEMORY_COST (p1
) - ALLOCNO_UPDATED_CLASS_COST (p1
);
2694 c2
= ALLOCNO_UPDATED_MEMORY_COST (p2
) - ALLOCNO_UPDATED_CLASS_COST (p2
);
2698 /* If regs are equally good, sort by allocno numbers, so that the
2699 results of qsort leave nothing to chance. */
2700 return ALLOCNO_NUM (p1
) - ALLOCNO_NUM (p2
);
2703 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2704 possible to hard registers. Let us try to improve allocation with
2705 cost point of view. This function improves the allocation by
2706 spilling some allocnos and assigning the freed hard registers to
2707 other allocnos if it decreases the overall allocation cost. */
2709 improve_allocation (void)
2712 int j
, k
, n
, hregno
, conflict_hregno
, base_cost
, class_size
, word
, nwords
;
2713 int check
, spill_cost
, min_cost
, nregs
, conflict_nregs
, r
, best
;
2715 enum reg_class aclass
;
2716 enum machine_mode mode
;
2718 int costs
[FIRST_PSEUDO_REGISTER
];
2719 HARD_REG_SET conflicting_regs
[2], profitable_hard_regs
;
2723 /* Clear counts used to process conflicting allocnos only once for
2725 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2726 ALLOCNO_COLOR_DATA (ira_allocnos
[i
])->temp
= 0;
2728 /* Process each allocno and try to assign a hard register to it by
2729 spilling some its conflicting allocnos. */
2730 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2732 a
= ira_allocnos
[i
];
2733 ALLOCNO_COLOR_DATA (a
)->temp
= 0;
2734 if (empty_profitable_hard_regs (a
))
2737 aclass
= ALLOCNO_CLASS (a
);
2738 allocno_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (a
);
2739 if (allocno_costs
== NULL
)
2740 allocno_costs
= ALLOCNO_HARD_REG_COSTS (a
);
2741 if ((hregno
= ALLOCNO_HARD_REGNO (a
)) < 0)
2742 base_cost
= ALLOCNO_UPDATED_MEMORY_COST (a
);
2743 else if (allocno_costs
== NULL
)
2744 /* It means that assigning a hard register is not profitable
2745 (we don't waste memory for hard register costs in this
2749 base_cost
= allocno_costs
[ira_class_hard_reg_index
[aclass
][hregno
]];
2751 get_conflict_and_start_profitable_regs (a
, false,
2753 &profitable_hard_regs
);
2754 class_size
= ira_class_hard_regs_num
[aclass
];
2755 /* Set up cost improvement for usage of each profitable hard
2756 register for allocno A. */
2757 for (j
= 0; j
< class_size
; j
++)
2759 hregno
= ira_class_hard_regs
[aclass
][j
];
2760 if (! check_hard_reg_p (a
, hregno
,
2761 conflicting_regs
, profitable_hard_regs
))
2763 ira_assert (ira_class_hard_reg_index
[aclass
][hregno
] == j
);
2764 k
= allocno_costs
== NULL
? 0 : j
;
2765 costs
[hregno
] = (allocno_costs
== NULL
2766 ? ALLOCNO_UPDATED_CLASS_COST (a
) : allocno_costs
[k
]);
2767 costs
[hregno
] -= base_cost
;
2768 if (costs
[hregno
] < 0)
2772 /* There is no chance to improve the allocation cost by
2773 assigning hard register to allocno A even without spilling
2774 conflicting allocnos. */
2776 mode
= ALLOCNO_MODE (a
);
2777 nwords
= ALLOCNO_NUM_OBJECTS (a
);
2778 /* Process each allocno conflicting with A and update the cost
2779 improvement for profitable hard registers of A. To use a
2780 hard register for A we need to spill some conflicting
2781 allocnos and that creates penalty for the cost
2783 for (word
= 0; word
< nwords
; word
++)
2785 ira_object_t conflict_obj
;
2786 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
2787 ira_object_conflict_iterator oci
;
2789 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2791 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2793 if (ALLOCNO_COLOR_DATA (conflict_a
)->temp
== check
)
2794 /* We already processed this conflicting allocno
2795 because we processed earlier another object of the
2796 conflicting allocno. */
2798 ALLOCNO_COLOR_DATA (conflict_a
)->temp
= check
;
2799 if ((conflict_hregno
= ALLOCNO_HARD_REGNO (conflict_a
)) < 0)
2801 spill_cost
= ALLOCNO_UPDATED_MEMORY_COST (conflict_a
);
2802 k
= (ira_class_hard_reg_index
2803 [ALLOCNO_CLASS (conflict_a
)][conflict_hregno
]);
2804 ira_assert (k
>= 0);
2805 if ((allocno_costs
= ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a
))
2807 spill_cost
-= allocno_costs
[k
];
2808 else if ((allocno_costs
= ALLOCNO_HARD_REG_COSTS (conflict_a
))
2810 spill_cost
-= allocno_costs
[k
];
2812 spill_cost
-= ALLOCNO_UPDATED_CLASS_COST (conflict_a
);
2814 = hard_regno_nregs
[conflict_hregno
][ALLOCNO_MODE (conflict_a
)];
2815 for (r
= conflict_hregno
;
2816 r
>= 0 && r
+ hard_regno_nregs
[r
][mode
] > conflict_hregno
;
2818 if (check_hard_reg_p (a
, r
,
2819 conflicting_regs
, profitable_hard_regs
))
2820 costs
[r
] += spill_cost
;
2821 for (r
= conflict_hregno
+ 1;
2822 r
< conflict_hregno
+ conflict_nregs
;
2824 if (check_hard_reg_p (a
, r
,
2825 conflicting_regs
, profitable_hard_regs
))
2826 costs
[r
] += spill_cost
;
2831 /* Now we choose hard register for A which results in highest
2832 allocation cost improvement. */
2833 for (j
= 0; j
< class_size
; j
++)
2835 hregno
= ira_class_hard_regs
[aclass
][j
];
2836 if (check_hard_reg_p (a
, hregno
,
2837 conflicting_regs
, profitable_hard_regs
)
2838 && min_cost
> costs
[hregno
])
2841 min_cost
= costs
[hregno
];
2845 /* We are in a situation when assigning any hard register to A
2846 by spilling some conflicting allocnos does not improve the
2849 nregs
= hard_regno_nregs
[best
][mode
];
2850 /* Now spill conflicting allocnos which contain a hard register
2851 of A when we assign the best chosen hard register to it. */
2852 for (word
= 0; word
< nwords
; word
++)
2854 ira_object_t conflict_obj
;
2855 ira_object_t obj
= ALLOCNO_OBJECT (a
, word
);
2856 ira_object_conflict_iterator oci
;
2858 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
2860 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
2862 if ((conflict_hregno
= ALLOCNO_HARD_REGNO (conflict_a
)) < 0)
2865 = hard_regno_nregs
[conflict_hregno
][ALLOCNO_MODE (conflict_a
)];
2866 if (best
+ nregs
<= conflict_hregno
2867 || conflict_hregno
+ conflict_nregs
<= best
)
2868 /* No intersection. */
2870 ALLOCNO_HARD_REGNO (conflict_a
) = -1;
2871 sorted_allocnos
[n
++] = conflict_a
;
2872 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2873 fprintf (ira_dump_file
, "Spilling a%dr%d for a%dr%d\n",
2874 ALLOCNO_NUM (conflict_a
), ALLOCNO_REGNO (conflict_a
),
2875 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
2878 /* Assign the best chosen hard register to A. */
2879 ALLOCNO_HARD_REGNO (a
) = best
;
2880 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
2881 fprintf (ira_dump_file
, "Assigning %d to a%dr%d\n",
2882 best
, ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
2886 /* We spilled some allocnos to assign their hard registers to other
2887 allocnos. The spilled allocnos are now in array
2888 'sorted_allocnos'. There is still a possibility that some of the
2889 spilled allocnos can get hard registers. So let us try assign
2890 them hard registers again (just a reminder -- function
2891 'assign_hard_reg' assigns hard registers only if it is possible
2892 and profitable). We process the spilled allocnos with biggest
2893 benefit to get hard register first -- see function
2894 'allocno_cost_compare_func'. */
2895 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
),
2896 allocno_cost_compare_func
);
2897 for (j
= 0; j
< n
; j
++)
2899 a
= sorted_allocnos
[j
];
2900 ALLOCNO_ASSIGNED_P (a
) = false;
2901 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2903 fprintf (ira_dump_file
, " ");
2904 ira_print_expanded_allocno (a
);
2905 fprintf (ira_dump_file
, " -- ");
2907 if (assign_hard_reg (a
, false))
2909 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2910 fprintf (ira_dump_file
, "assign hard reg %d\n",
2911 ALLOCNO_HARD_REGNO (a
));
2915 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2916 fprintf (ira_dump_file
, "assign memory\n");
2921 /* Sort allocnos according to their priorities. */
2923 allocno_priority_compare_func (const void *v1p
, const void *v2p
)
2925 ira_allocno_t a1
= *(const ira_allocno_t
*) v1p
;
2926 ira_allocno_t a2
= *(const ira_allocno_t
*) v2p
;
2929 pri1
= allocno_priorities
[ALLOCNO_NUM (a1
)];
2930 pri2
= allocno_priorities
[ALLOCNO_NUM (a2
)];
2932 return SORTGT (pri2
, pri1
);
2934 /* If regs are equally good, sort by allocnos, so that the results of
2935 qsort leave nothing to chance. */
2936 return ALLOCNO_NUM (a1
) - ALLOCNO_NUM (a2
);
2939 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2940 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2942 color_allocnos (void)
2948 setup_profitable_hard_regs ();
2949 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2952 HARD_REG_SET conflict_hard_regs
;
2953 allocno_color_data_t data
;
2954 ira_pref_t pref
, next_pref
;
2956 a
= ira_allocnos
[i
];
2957 nr
= ALLOCNO_NUM_OBJECTS (a
);
2958 CLEAR_HARD_REG_SET (conflict_hard_regs
);
2959 for (l
= 0; l
< nr
; l
++)
2961 ira_object_t obj
= ALLOCNO_OBJECT (a
, l
);
2962 IOR_HARD_REG_SET (conflict_hard_regs
,
2963 OBJECT_CONFLICT_HARD_REGS (obj
));
2965 data
= ALLOCNO_COLOR_DATA (a
);
2966 for (pref
= ALLOCNO_PREFS (a
); pref
!= NULL
; pref
= next_pref
)
2968 next_pref
= pref
->next_pref
;
2969 if (! ira_hard_reg_in_set_p (pref
->hard_regno
,
2971 data
->profitable_hard_regs
))
2972 ira_remove_pref (pref
);
2975 if (flag_ira_algorithm
== IRA_ALGORITHM_PRIORITY
)
2978 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
2980 a
= ira_allocnos
[i
];
2981 if (ALLOCNO_CLASS (a
) == NO_REGS
)
2983 ALLOCNO_HARD_REGNO (a
) = -1;
2984 ALLOCNO_ASSIGNED_P (a
) = true;
2985 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
2986 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
2987 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
2989 fprintf (ira_dump_file
, " Spill");
2990 ira_print_expanded_allocno (a
);
2991 fprintf (ira_dump_file
, "\n");
2995 sorted_allocnos
[n
++] = a
;
2999 setup_allocno_priorities (sorted_allocnos
, n
);
3000 qsort (sorted_allocnos
, n
, sizeof (ira_allocno_t
),
3001 allocno_priority_compare_func
);
3002 for (i
= 0; i
< n
; i
++)
3004 a
= sorted_allocnos
[i
];
3005 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3007 fprintf (ira_dump_file
, " ");
3008 ira_print_expanded_allocno (a
);
3009 fprintf (ira_dump_file
, " -- ");
3011 if (assign_hard_reg (a
, false))
3013 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3014 fprintf (ira_dump_file
, "assign hard reg %d\n",
3015 ALLOCNO_HARD_REGNO (a
));
3019 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3020 fprintf (ira_dump_file
, "assign memory\n");
3027 form_allocno_hard_regs_nodes_forest ();
3028 if (internal_flag_ira_verbose
> 2 && ira_dump_file
!= NULL
)
3029 print_hard_regs_forest (ira_dump_file
);
3030 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
3032 a
= ira_allocnos
[i
];
3033 if (ALLOCNO_CLASS (a
) != NO_REGS
&& ! empty_profitable_hard_regs (a
))
3035 ALLOCNO_COLOR_DATA (a
)->in_graph_p
= true;
3036 update_costs_from_prefs (a
);
3040 ALLOCNO_HARD_REGNO (a
) = -1;
3041 ALLOCNO_ASSIGNED_P (a
) = true;
3042 /* We don't need updated costs anymore. */
3043 ira_free_allocno_updated_costs (a
);
3044 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3046 fprintf (ira_dump_file
, " Spill");
3047 ira_print_expanded_allocno (a
);
3048 fprintf (ira_dump_file
, "\n");
3052 /* Put the allocnos into the corresponding buckets. */
3053 colorable_allocno_bucket
= NULL
;
3054 uncolorable_allocno_bucket
= NULL
;
3055 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, i
, bi
)
3057 a
= ira_allocnos
[i
];
3058 if (ALLOCNO_COLOR_DATA (a
)->in_graph_p
)
3059 put_allocno_into_bucket (a
);
3061 push_allocnos_to_stack ();
3062 pop_allocnos_from_stack ();
3063 finish_allocno_hard_regs_nodes_forest ();
3065 improve_allocation ();
3070 /* Output information about the loop given by its LOOP_TREE_NODE. */
3072 print_loop_title (ira_loop_tree_node_t loop_tree_node
)
3076 ira_loop_tree_node_t subloop_node
, dest_loop_node
;
3080 if (loop_tree_node
->parent
== NULL
)
3081 fprintf (ira_dump_file
,
3082 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3086 ira_assert (current_loops
!= NULL
&& loop_tree_node
->loop
!= NULL
);
3087 fprintf (ira_dump_file
,
3088 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3089 loop_tree_node
->loop_num
, loop_tree_node
->parent
->loop_num
,
3090 loop_tree_node
->loop
->header
->index
,
3091 loop_depth (loop_tree_node
->loop
));
3093 for (subloop_node
= loop_tree_node
->children
;
3094 subloop_node
!= NULL
;
3095 subloop_node
= subloop_node
->next
)
3096 if (subloop_node
->bb
!= NULL
)
3098 fprintf (ira_dump_file
, " %d", subloop_node
->bb
->index
);
3099 FOR_EACH_EDGE (e
, ei
, subloop_node
->bb
->succs
)
3100 if (e
->dest
!= EXIT_BLOCK_PTR_FOR_FN (cfun
)
3101 && ((dest_loop_node
= IRA_BB_NODE (e
->dest
)->parent
)
3103 fprintf (ira_dump_file
, "(->%d:l%d)",
3104 e
->dest
->index
, dest_loop_node
->loop_num
);
3106 fprintf (ira_dump_file
, "\n all:");
3107 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->all_allocnos
, 0, j
, bi
)
3108 fprintf (ira_dump_file
, " %dr%d", j
, ALLOCNO_REGNO (ira_allocnos
[j
]));
3109 fprintf (ira_dump_file
, "\n modified regnos:");
3110 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->modified_regnos
, 0, j
, bi
)
3111 fprintf (ira_dump_file
, " %d", j
);
3112 fprintf (ira_dump_file
, "\n border:");
3113 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->border_allocnos
, 0, j
, bi
)
3114 fprintf (ira_dump_file
, " %dr%d", j
, ALLOCNO_REGNO (ira_allocnos
[j
]));
3115 fprintf (ira_dump_file
, "\n Pressure:");
3116 for (j
= 0; (int) j
< ira_pressure_classes_num
; j
++)
3118 enum reg_class pclass
;
3120 pclass
= ira_pressure_classes
[j
];
3121 if (loop_tree_node
->reg_pressure
[pclass
] == 0)
3123 fprintf (ira_dump_file
, " %s=%d", reg_class_names
[pclass
],
3124 loop_tree_node
->reg_pressure
[pclass
]);
3126 fprintf (ira_dump_file
, "\n");
3129 /* Color the allocnos inside loop (in the extreme case it can be all
3130 of the function) given the corresponding LOOP_TREE_NODE. The
3131 function is called for each loop during top-down traverse of the
3134 color_pass (ira_loop_tree_node_t loop_tree_node
)
3136 int regno
, hard_regno
, index
= -1, n
;
3137 int cost
, exit_freq
, enter_freq
;
3140 enum machine_mode mode
;
3141 enum reg_class rclass
, aclass
, pclass
;
3142 ira_allocno_t a
, subloop_allocno
;
3143 ira_loop_tree_node_t subloop_node
;
3145 ira_assert (loop_tree_node
->bb
== NULL
);
3146 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
3147 print_loop_title (loop_tree_node
);
3149 bitmap_copy (coloring_allocno_bitmap
, loop_tree_node
->all_allocnos
);
3150 bitmap_copy (consideration_allocno_bitmap
, coloring_allocno_bitmap
);
3152 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
3154 a
= ira_allocnos
[j
];
3156 if (! ALLOCNO_ASSIGNED_P (a
))
3158 bitmap_clear_bit (coloring_allocno_bitmap
, ALLOCNO_NUM (a
));
3161 = (allocno_color_data_t
) ira_allocate (sizeof (struct allocno_color_data
)
3163 memset (allocno_color_data
, 0, sizeof (struct allocno_color_data
) * n
);
3164 curr_allocno_process
= 0;
3166 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
3168 a
= ira_allocnos
[j
];
3169 ALLOCNO_ADD_DATA (a
) = allocno_color_data
+ n
;
3172 init_allocno_threads ();
3173 /* Color all mentioned allocnos including transparent ones. */
3175 /* Process caps. They are processed just once. */
3176 if (flag_ira_region
== IRA_REGION_MIXED
3177 || flag_ira_region
== IRA_REGION_ALL
)
3178 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node
->all_allocnos
, 0, j
, bi
)
3180 a
= ira_allocnos
[j
];
3181 if (ALLOCNO_CAP_MEMBER (a
) == NULL
)
3183 /* Remove from processing in the next loop. */
3184 bitmap_clear_bit (consideration_allocno_bitmap
, j
);
3185 rclass
= ALLOCNO_CLASS (a
);
3186 pclass
= ira_pressure_class_translate
[rclass
];
3187 if (flag_ira_region
== IRA_REGION_MIXED
3188 && (loop_tree_node
->reg_pressure
[pclass
]
3189 <= ira_class_hard_regs_num
[pclass
]))
3191 mode
= ALLOCNO_MODE (a
);
3192 hard_regno
= ALLOCNO_HARD_REGNO (a
);
3193 if (hard_regno
>= 0)
3195 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
3196 ira_assert (index
>= 0);
3198 regno
= ALLOCNO_REGNO (a
);
3199 subloop_allocno
= ALLOCNO_CAP_MEMBER (a
);
3200 subloop_node
= ALLOCNO_LOOP_TREE_NODE (subloop_allocno
);
3201 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno
));
3202 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
3203 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
3204 if (hard_regno
>= 0)
3205 update_costs_from_copies (subloop_allocno
, true, true);
3206 /* We don't need updated costs anymore: */
3207 ira_free_allocno_updated_costs (subloop_allocno
);
3210 /* Update costs of the corresponding allocnos (not caps) in the
3212 for (subloop_node
= loop_tree_node
->subloops
;
3213 subloop_node
!= NULL
;
3214 subloop_node
= subloop_node
->subloop_next
)
3216 ira_assert (subloop_node
->bb
== NULL
);
3217 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
3219 a
= ira_allocnos
[j
];
3220 ira_assert (ALLOCNO_CAP_MEMBER (a
) == NULL
);
3221 mode
= ALLOCNO_MODE (a
);
3222 rclass
= ALLOCNO_CLASS (a
);
3223 pclass
= ira_pressure_class_translate
[rclass
];
3224 hard_regno
= ALLOCNO_HARD_REGNO (a
);
3225 /* Use hard register class here. ??? */
3226 if (hard_regno
>= 0)
3228 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
3229 ira_assert (index
>= 0);
3231 regno
= ALLOCNO_REGNO (a
);
3232 /* ??? conflict costs */
3233 subloop_allocno
= subloop_node
->regno_allocno_map
[regno
];
3234 if (subloop_allocno
== NULL
3235 || ALLOCNO_CAP (subloop_allocno
) != NULL
)
3237 ira_assert (ALLOCNO_CLASS (subloop_allocno
) == rclass
);
3238 ira_assert (bitmap_bit_p (subloop_node
->all_allocnos
,
3239 ALLOCNO_NUM (subloop_allocno
)));
3240 if ((flag_ira_region
== IRA_REGION_MIXED
)
3241 && (loop_tree_node
->reg_pressure
[pclass
]
3242 <= ira_class_hard_regs_num
[pclass
]))
3244 if (! ALLOCNO_ASSIGNED_P (subloop_allocno
))
3246 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
3247 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
3248 if (hard_regno
>= 0)
3249 update_costs_from_copies (subloop_allocno
, true, true);
3250 /* We don't need updated costs anymore: */
3251 ira_free_allocno_updated_costs (subloop_allocno
);
3255 exit_freq
= ira_loop_edge_freq (subloop_node
, regno
, true);
3256 enter_freq
= ira_loop_edge_freq (subloop_node
, regno
, false);
3257 ira_assert (regno
< ira_reg_equiv_len
);
3258 if (ira_equiv_no_lvalue_p (regno
))
3260 if (! ALLOCNO_ASSIGNED_P (subloop_allocno
))
3262 ALLOCNO_HARD_REGNO (subloop_allocno
) = hard_regno
;
3263 ALLOCNO_ASSIGNED_P (subloop_allocno
) = true;
3264 if (hard_regno
>= 0)
3265 update_costs_from_copies (subloop_allocno
, true, true);
3266 /* We don't need updated costs anymore: */
3267 ira_free_allocno_updated_costs (subloop_allocno
);
3270 else if (hard_regno
< 0)
3272 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno
)
3273 -= ((ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
)
3274 + (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
));
3278 aclass
= ALLOCNO_CLASS (subloop_allocno
);
3279 ira_init_register_move_cost_if_necessary (mode
);
3280 cost
= (ira_register_move_cost
[mode
][rclass
][rclass
]
3281 * (exit_freq
+ enter_freq
));
3282 ira_allocate_and_set_or_copy_costs
3283 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
), aclass
,
3284 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
),
3285 ALLOCNO_HARD_REG_COSTS (subloop_allocno
));
3286 ira_allocate_and_set_or_copy_costs
3287 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno
),
3288 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno
));
3289 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
] -= cost
;
3290 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno
)[index
]
3292 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
)
3293 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
])
3294 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno
)
3295 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno
)[index
];
3296 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno
)
3297 += (ira_memory_move_cost
[mode
][rclass
][0] * enter_freq
3298 + ira_memory_move_cost
[mode
][rclass
][1] * exit_freq
);
3302 ira_free (allocno_color_data
);
3303 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap
, 0, j
, bi
)
3305 a
= ira_allocnos
[j
];
3306 ALLOCNO_ADD_DATA (a
) = NULL
;
3310 /* Initialize the common data for coloring and calls functions to do
3311 Chaitin-Briggs and regional coloring. */
3315 coloring_allocno_bitmap
= ira_allocate_bitmap ();
3316 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3317 fprintf (ira_dump_file
, "\n**** Allocnos coloring:\n\n");
3319 ira_traverse_loop_tree (false, ira_loop_tree_root
, color_pass
, NULL
);
3321 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
3322 ira_print_disposition (ira_dump_file
);
3324 ira_free_bitmap (coloring_allocno_bitmap
);
3329 /* Move spill/restore code, which are to be generated in ira-emit.c,
3330 to less frequent points (if it is profitable) by reassigning some
3331 allocnos (in loop with subloops containing in another loop) to
3332 memory which results in longer live-range where the corresponding
3333 pseudo-registers will be in memory. */
3335 move_spill_restore (void)
3337 int cost
, regno
, hard_regno
, hard_regno2
, index
;
3339 int enter_freq
, exit_freq
;
3340 enum machine_mode mode
;
3341 enum reg_class rclass
;
3342 ira_allocno_t a
, parent_allocno
, subloop_allocno
;
3343 ira_loop_tree_node_t parent
, loop_node
, subloop_node
;
3344 ira_allocno_iterator ai
;
3349 if (internal_flag_ira_verbose
> 0 && ira_dump_file
!= NULL
)
3350 fprintf (ira_dump_file
, "New iteration of spill/restore move\n");
3351 FOR_EACH_ALLOCNO (a
, ai
)
3353 regno
= ALLOCNO_REGNO (a
);
3354 loop_node
= ALLOCNO_LOOP_TREE_NODE (a
);
3355 if (ALLOCNO_CAP_MEMBER (a
) != NULL
3356 || ALLOCNO_CAP (a
) != NULL
3357 || (hard_regno
= ALLOCNO_HARD_REGNO (a
)) < 0
3358 || loop_node
->children
== NULL
3359 /* don't do the optimization because it can create
3360 copies and the reload pass can spill the allocno set
3361 by copy although the allocno will not get memory
3363 || ira_equiv_no_lvalue_p (regno
)
3364 || !bitmap_bit_p (loop_node
->border_allocnos
, ALLOCNO_NUM (a
)))
3366 mode
= ALLOCNO_MODE (a
);
3367 rclass
= ALLOCNO_CLASS (a
);
3368 index
= ira_class_hard_reg_index
[rclass
][hard_regno
];
3369 ira_assert (index
>= 0);
3370 cost
= (ALLOCNO_MEMORY_COST (a
)
3371 - (ALLOCNO_HARD_REG_COSTS (a
) == NULL
3372 ? ALLOCNO_CLASS_COST (a
)
3373 : ALLOCNO_HARD_REG_COSTS (a
)[index
]));
3374 ira_init_register_move_cost_if_necessary (mode
);
3375 for (subloop_node
= loop_node
->subloops
;
3376 subloop_node
!= NULL
;
3377 subloop_node
= subloop_node
->subloop_next
)
3379 ira_assert (subloop_node
->bb
== NULL
);
3380 subloop_allocno
= subloop_node
->regno_allocno_map
[regno
];
3381 if (subloop_allocno
== NULL
)
3383 ira_assert (rclass
== ALLOCNO_CLASS (subloop_allocno
));
3384 /* We have accumulated cost. To get the real cost of
3385 allocno usage in the loop we should subtract costs of
3386 the subloop allocnos. */
3387 cost
-= (ALLOCNO_MEMORY_COST (subloop_allocno
)
3388 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno
) == NULL
3389 ? ALLOCNO_CLASS_COST (subloop_allocno
)
3390 : ALLOCNO_HARD_REG_COSTS (subloop_allocno
)[index
]));
3391 exit_freq
= ira_loop_edge_freq (subloop_node
, regno
, true);
3392 enter_freq
= ira_loop_edge_freq (subloop_node
, regno
, false);
3393 if ((hard_regno2
= ALLOCNO_HARD_REGNO (subloop_allocno
)) < 0)
3394 cost
-= (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
3395 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
3399 += (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
3400 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
3401 if (hard_regno2
!= hard_regno
)
3402 cost
-= (ira_register_move_cost
[mode
][rclass
][rclass
]
3403 * (exit_freq
+ enter_freq
));
3406 if ((parent
= loop_node
->parent
) != NULL
3407 && (parent_allocno
= parent
->regno_allocno_map
[regno
]) != NULL
)
3409 ira_assert (rclass
== ALLOCNO_CLASS (parent_allocno
));
3410 exit_freq
= ira_loop_edge_freq (loop_node
, regno
, true);
3411 enter_freq
= ira_loop_edge_freq (loop_node
, regno
, false);
3412 if ((hard_regno2
= ALLOCNO_HARD_REGNO (parent_allocno
)) < 0)
3413 cost
-= (ira_memory_move_cost
[mode
][rclass
][0] * exit_freq
3414 + ira_memory_move_cost
[mode
][rclass
][1] * enter_freq
);
3418 += (ira_memory_move_cost
[mode
][rclass
][1] * exit_freq
3419 + ira_memory_move_cost
[mode
][rclass
][0] * enter_freq
);
3420 if (hard_regno2
!= hard_regno
)
3421 cost
-= (ira_register_move_cost
[mode
][rclass
][rclass
]
3422 * (exit_freq
+ enter_freq
));
3427 ALLOCNO_HARD_REGNO (a
) = -1;
3428 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3432 " Moving spill/restore for a%dr%d up from loop %d",
3433 ALLOCNO_NUM (a
), regno
, loop_node
->loop_num
);
3434 fprintf (ira_dump_file
, " - profit %d\n", -cost
);
3446 /* Update current hard reg costs and current conflict hard reg costs
3447 for allocno A. It is done by processing its copies containing
3448 other allocnos already assigned. */
3450 update_curr_costs (ira_allocno_t a
)
3452 int i
, hard_regno
, cost
;
3453 enum machine_mode mode
;
3454 enum reg_class aclass
, rclass
;
3455 ira_allocno_t another_a
;
3456 ira_copy_t cp
, next_cp
;
3458 ira_free_allocno_updated_costs (a
);
3459 ira_assert (! ALLOCNO_ASSIGNED_P (a
));
3460 aclass
= ALLOCNO_CLASS (a
);
3461 if (aclass
== NO_REGS
)
3463 mode
= ALLOCNO_MODE (a
);
3464 ira_init_register_move_cost_if_necessary (mode
);
3465 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
3469 next_cp
= cp
->next_first_allocno_copy
;
3470 another_a
= cp
->second
;
3472 else if (cp
->second
== a
)
3474 next_cp
= cp
->next_second_allocno_copy
;
3475 another_a
= cp
->first
;
3479 if (! ira_reg_classes_intersect_p
[aclass
][ALLOCNO_CLASS (another_a
)]
3480 || ! ALLOCNO_ASSIGNED_P (another_a
)
3481 || (hard_regno
= ALLOCNO_HARD_REGNO (another_a
)) < 0)
3483 rclass
= REGNO_REG_CLASS (hard_regno
);
3484 i
= ira_class_hard_reg_index
[aclass
][hard_regno
];
3487 cost
= (cp
->first
== a
3488 ? ira_register_move_cost
[mode
][rclass
][aclass
]
3489 : ira_register_move_cost
[mode
][aclass
][rclass
]);
3490 ira_allocate_and_set_or_copy_costs
3491 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a
), aclass
, ALLOCNO_CLASS_COST (a
),
3492 ALLOCNO_HARD_REG_COSTS (a
));
3493 ira_allocate_and_set_or_copy_costs
3494 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
),
3495 aclass
, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a
));
3496 ALLOCNO_UPDATED_HARD_REG_COSTS (a
)[i
] -= cp
->freq
* cost
;
3497 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
)[i
] -= cp
->freq
* cost
;
3501 /* Try to assign hard registers to the unassigned allocnos and
3502 allocnos conflicting with them or conflicting with allocnos whose
3503 regno >= START_REGNO. The function is called after ira_flattening,
3504 so more allocnos (including ones created in ira-emit.c) will have a
3505 chance to get a hard register. We use simple assignment algorithm
3506 based on priorities. */
3508 ira_reassign_conflict_allocnos (int start_regno
)
3510 int i
, allocnos_to_color_num
;
3512 enum reg_class aclass
;
3513 bitmap allocnos_to_color
;
3514 ira_allocno_iterator ai
;
3516 allocnos_to_color
= ira_allocate_bitmap ();
3517 allocnos_to_color_num
= 0;
3518 FOR_EACH_ALLOCNO (a
, ai
)
3520 int n
= ALLOCNO_NUM_OBJECTS (a
);
3522 if (! ALLOCNO_ASSIGNED_P (a
)
3523 && ! bitmap_bit_p (allocnos_to_color
, ALLOCNO_NUM (a
)))
3525 if (ALLOCNO_CLASS (a
) != NO_REGS
)
3526 sorted_allocnos
[allocnos_to_color_num
++] = a
;
3529 ALLOCNO_ASSIGNED_P (a
) = true;
3530 ALLOCNO_HARD_REGNO (a
) = -1;
3531 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a
) == NULL
);
3532 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a
) == NULL
);
3534 bitmap_set_bit (allocnos_to_color
, ALLOCNO_NUM (a
));
3536 if (ALLOCNO_REGNO (a
) < start_regno
3537 || (aclass
= ALLOCNO_CLASS (a
)) == NO_REGS
)
3539 for (i
= 0; i
< n
; i
++)
3541 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3542 ira_object_t conflict_obj
;
3543 ira_object_conflict_iterator oci
;
3545 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
3547 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
3549 ira_assert (ira_reg_classes_intersect_p
3550 [aclass
][ALLOCNO_CLASS (conflict_a
)]);
3551 if (!bitmap_set_bit (allocnos_to_color
, ALLOCNO_NUM (conflict_a
)))
3553 sorted_allocnos
[allocnos_to_color_num
++] = conflict_a
;
3557 ira_free_bitmap (allocnos_to_color
);
3558 if (allocnos_to_color_num
> 1)
3560 setup_allocno_priorities (sorted_allocnos
, allocnos_to_color_num
);
3561 qsort (sorted_allocnos
, allocnos_to_color_num
, sizeof (ira_allocno_t
),
3562 allocno_priority_compare_func
);
3564 for (i
= 0; i
< allocnos_to_color_num
; i
++)
3566 a
= sorted_allocnos
[i
];
3567 ALLOCNO_ASSIGNED_P (a
) = false;
3568 update_curr_costs (a
);
3570 for (i
= 0; i
< allocnos_to_color_num
; i
++)
3572 a
= sorted_allocnos
[i
];
3573 if (assign_hard_reg (a
, true))
3575 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3578 " Secondary allocation: assign hard reg %d to reg %d\n",
3579 ALLOCNO_HARD_REGNO (a
), ALLOCNO_REGNO (a
));
3586 /* This page contains functions used to find conflicts using allocno
3589 #ifdef ENABLE_IRA_CHECKING
3591 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3592 intersect. This should be used when there is only one region.
3593 Currently this is used during reload. */
3595 conflict_by_live_ranges_p (int regno1
, int regno2
)
3597 ira_allocno_t a1
, a2
;
3599 ira_assert (regno1
>= FIRST_PSEUDO_REGISTER
3600 && regno2
>= FIRST_PSEUDO_REGISTER
);
3601 /* Reg info caclulated by dataflow infrastructure can be different
3602 from one calculated by regclass. */
3603 if ((a1
= ira_loop_tree_root
->regno_allocno_map
[regno1
]) == NULL
3604 || (a2
= ira_loop_tree_root
->regno_allocno_map
[regno2
]) == NULL
)
3606 return allocnos_conflict_by_live_ranges_p (a1
, a2
);
3613 /* This page contains code to coalesce memory stack slots used by
3614 spilled allocnos. This results in smaller stack frame, better data
3615 locality, and in smaller code for some architectures like
3616 x86/x86_64 where insn size depends on address displacement value.
3617 On the other hand, it can worsen insn scheduling after the RA but
3618 in practice it is less important than smaller stack frames. */
3620 /* TRUE if we coalesced some allocnos. In other words, if we got
3621 loops formed by members first_coalesced_allocno and
3622 next_coalesced_allocno containing more one allocno. */
3623 static bool allocno_coalesced_p
;
3625 /* Bitmap used to prevent a repeated allocno processing because of
3627 static bitmap processed_coalesced_allocno_bitmap
;
3630 typedef struct coalesce_data
*coalesce_data_t
;
3632 /* To decrease footprint of ira_allocno structure we store all data
3633 needed only for coalescing in the following structure. */
3634 struct coalesce_data
3636 /* Coalesced allocnos form a cyclic list. One allocno given by
3637 FIRST represents all coalesced allocnos. The
3638 list is chained by NEXT. */
3639 ira_allocno_t first
;
3644 /* Container for storing allocno data concerning coalescing. */
3645 static coalesce_data_t allocno_coalesce_data
;
3647 /* Macro to access the data concerning coalescing. */
3648 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3650 /* Merge two sets of coalesced allocnos given correspondingly by
3651 allocnos A1 and A2 (more accurately merging A2 set into A1
3654 merge_allocnos (ira_allocno_t a1
, ira_allocno_t a2
)
3656 ira_allocno_t a
, first
, last
, next
;
3658 first
= ALLOCNO_COALESCE_DATA (a1
)->first
;
3659 a
= ALLOCNO_COALESCE_DATA (a2
)->first
;
3662 for (last
= a2
, a
= ALLOCNO_COALESCE_DATA (a2
)->next
;;
3663 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3665 ALLOCNO_COALESCE_DATA (a
)->first
= first
;
3670 next
= allocno_coalesce_data
[ALLOCNO_NUM (first
)].next
;
3671 allocno_coalesce_data
[ALLOCNO_NUM (first
)].next
= a2
;
3672 allocno_coalesce_data
[ALLOCNO_NUM (last
)].next
= next
;
3675 /* Return TRUE if there are conflicting allocnos from two sets of
3676 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3677 use live ranges to find conflicts because conflicts are represented
3678 only for allocnos of the same allocno class and during the reload
3679 pass we coalesce allocnos for sharing stack memory slots. */
3681 coalesced_allocno_conflict_p (ira_allocno_t a1
, ira_allocno_t a2
)
3683 ira_allocno_t a
, conflict_a
;
3685 if (allocno_coalesced_p
)
3687 bitmap_clear (processed_coalesced_allocno_bitmap
);
3688 for (a
= ALLOCNO_COALESCE_DATA (a1
)->next
;;
3689 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3691 bitmap_set_bit (processed_coalesced_allocno_bitmap
, ALLOCNO_NUM (a
));
3696 for (a
= ALLOCNO_COALESCE_DATA (a2
)->next
;;
3697 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3699 for (conflict_a
= ALLOCNO_COALESCE_DATA (a1
)->next
;;
3700 conflict_a
= ALLOCNO_COALESCE_DATA (conflict_a
)->next
)
3702 if (allocnos_conflict_by_live_ranges_p (a
, conflict_a
))
3704 if (conflict_a
== a1
)
3713 /* The major function for aggressive allocno coalescing. We coalesce
3714 only spilled allocnos. If some allocnos have been coalesced, we
3715 set up flag allocno_coalesced_p. */
3717 coalesce_allocnos (void)
3720 ira_copy_t cp
, next_cp
;
3722 int i
, n
, cp_num
, regno
;
3726 /* Collect copies. */
3727 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap
, 0, j
, bi
)
3729 a
= ira_allocnos
[j
];
3730 regno
= ALLOCNO_REGNO (a
);
3731 if (! ALLOCNO_ASSIGNED_P (a
) || ALLOCNO_HARD_REGNO (a
) >= 0
3732 || ira_equiv_no_lvalue_p (regno
))
3734 for (cp
= ALLOCNO_COPIES (a
); cp
!= NULL
; cp
= next_cp
)
3738 next_cp
= cp
->next_first_allocno_copy
;
3739 regno
= ALLOCNO_REGNO (cp
->second
);
3740 /* For priority coloring we coalesce allocnos only with
3741 the same allocno class not with intersected allocno
3742 classes as it were possible. It is done for
3744 if ((cp
->insn
!= NULL
|| cp
->constraint_p
)
3745 && ALLOCNO_ASSIGNED_P (cp
->second
)
3746 && ALLOCNO_HARD_REGNO (cp
->second
) < 0
3747 && ! ira_equiv_no_lvalue_p (regno
))
3748 sorted_copies
[cp_num
++] = cp
;
3750 else if (cp
->second
== a
)
3751 next_cp
= cp
->next_second_allocno_copy
;
3756 qsort (sorted_copies
, cp_num
, sizeof (ira_copy_t
), copy_freq_compare_func
);
3757 /* Coalesced copies, most frequently executed first. */
3758 for (; cp_num
!= 0;)
3760 for (i
= 0; i
< cp_num
; i
++)
3762 cp
= sorted_copies
[i
];
3763 if (! coalesced_allocno_conflict_p (cp
->first
, cp
->second
))
3765 allocno_coalesced_p
= true;
3766 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
3769 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3770 cp
->num
, ALLOCNO_NUM (cp
->first
), ALLOCNO_REGNO (cp
->first
),
3771 ALLOCNO_NUM (cp
->second
), ALLOCNO_REGNO (cp
->second
),
3773 merge_allocnos (cp
->first
, cp
->second
);
3778 /* Collect the rest of copies. */
3779 for (n
= 0; i
< cp_num
; i
++)
3781 cp
= sorted_copies
[i
];
3782 if (allocno_coalesce_data
[ALLOCNO_NUM (cp
->first
)].first
3783 != allocno_coalesce_data
[ALLOCNO_NUM (cp
->second
)].first
)
3784 sorted_copies
[n
++] = cp
;
3790 /* Usage cost and order number of coalesced allocno set to which
3791 given pseudo register belongs to. */
3792 static int *regno_coalesced_allocno_cost
;
3793 static int *regno_coalesced_allocno_num
;
3795 /* Sort pseudos according frequencies of coalesced allocno sets they
3796 belong to (putting most frequently ones first), and according to
3797 coalesced allocno set order numbers. */
3799 coalesced_pseudo_reg_freq_compare (const void *v1p
, const void *v2p
)
3801 const int regno1
= *(const int *) v1p
;
3802 const int regno2
= *(const int *) v2p
;
3805 if ((diff
= (regno_coalesced_allocno_cost
[regno2
]
3806 - regno_coalesced_allocno_cost
[regno1
])) != 0)
3808 if ((diff
= (regno_coalesced_allocno_num
[regno1
]
3809 - regno_coalesced_allocno_num
[regno2
])) != 0)
3811 return regno1
- regno2
;
3814 /* Widest width in which each pseudo reg is referred to (via subreg).
3815 It is used for sorting pseudo registers. */
3816 static unsigned int *regno_max_ref_width
;
3818 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
3819 #ifdef STACK_GROWS_DOWNWARD
3820 # undef STACK_GROWS_DOWNWARD
3821 # define STACK_GROWS_DOWNWARD 1
3823 # define STACK_GROWS_DOWNWARD 0
3826 /* Sort pseudos according their slot numbers (putting ones with
3827 smaller numbers first, or last when the frame pointer is not
3830 coalesced_pseudo_reg_slot_compare (const void *v1p
, const void *v2p
)
3832 const int regno1
= *(const int *) v1p
;
3833 const int regno2
= *(const int *) v2p
;
3834 ira_allocno_t a1
= ira_regno_allocno_map
[regno1
];
3835 ira_allocno_t a2
= ira_regno_allocno_map
[regno2
];
3836 int diff
, slot_num1
, slot_num2
;
3837 int total_size1
, total_size2
;
3839 if (a1
== NULL
|| ALLOCNO_HARD_REGNO (a1
) >= 0)
3841 if (a2
== NULL
|| ALLOCNO_HARD_REGNO (a2
) >= 0)
3842 return regno1
- regno2
;
3845 else if (a2
== NULL
|| ALLOCNO_HARD_REGNO (a2
) >= 0)
3847 slot_num1
= -ALLOCNO_HARD_REGNO (a1
);
3848 slot_num2
= -ALLOCNO_HARD_REGNO (a2
);
3849 if ((diff
= slot_num1
- slot_num2
) != 0)
3850 return (frame_pointer_needed
3851 || !FRAME_GROWS_DOWNWARD
== STACK_GROWS_DOWNWARD
? diff
: -diff
);
3852 total_size1
= MAX (PSEUDO_REGNO_BYTES (regno1
),
3853 regno_max_ref_width
[regno1
]);
3854 total_size2
= MAX (PSEUDO_REGNO_BYTES (regno2
),
3855 regno_max_ref_width
[regno2
]);
3856 if ((diff
= total_size2
- total_size1
) != 0)
3858 return regno1
- regno2
;
3861 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3862 for coalesced allocno sets containing allocnos with their regnos
3863 given in array PSEUDO_REGNOS of length N. */
3865 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos
, int n
)
3867 int i
, num
, regno
, cost
;
3868 ira_allocno_t allocno
, a
;
3870 for (num
= i
= 0; i
< n
; i
++)
3872 regno
= pseudo_regnos
[i
];
3873 allocno
= ira_regno_allocno_map
[regno
];
3874 if (allocno
== NULL
)
3876 regno_coalesced_allocno_cost
[regno
] = 0;
3877 regno_coalesced_allocno_num
[regno
] = ++num
;
3880 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
)
3883 for (cost
= 0, a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3884 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3886 cost
+= ALLOCNO_FREQ (a
);
3890 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3891 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3893 regno_coalesced_allocno_num
[ALLOCNO_REGNO (a
)] = num
;
3894 regno_coalesced_allocno_cost
[ALLOCNO_REGNO (a
)] = cost
;
3901 /* Collect spilled allocnos representing coalesced allocno sets (the
3902 first coalesced allocno). The collected allocnos are returned
3903 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3904 number of the collected allocnos. The allocnos are given by their
3905 regnos in array PSEUDO_REGNOS of length N. */
3907 collect_spilled_coalesced_allocnos (int *pseudo_regnos
, int n
,
3908 ira_allocno_t
*spilled_coalesced_allocnos
)
3911 ira_allocno_t allocno
;
3913 for (num
= i
= 0; i
< n
; i
++)
3915 regno
= pseudo_regnos
[i
];
3916 allocno
= ira_regno_allocno_map
[regno
];
3917 if (allocno
== NULL
|| ALLOCNO_HARD_REGNO (allocno
) >= 0
3918 || ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
)
3920 spilled_coalesced_allocnos
[num
++] = allocno
;
3925 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3926 given slot contains live ranges of coalesced allocnos assigned to
3928 static live_range_t
*slot_coalesced_allocnos_live_ranges
;
3930 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3931 ranges intersected with live ranges of coalesced allocnos assigned
3932 to slot with number N. */
3934 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno
, int n
)
3938 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3939 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3942 int nr
= ALLOCNO_NUM_OBJECTS (a
);
3944 for (i
= 0; i
< nr
; i
++)
3946 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3948 if (ira_live_ranges_intersect_p
3949 (slot_coalesced_allocnos_live_ranges
[n
],
3950 OBJECT_LIVE_RANGES (obj
)))
3959 /* Update live ranges of slot to which coalesced allocnos represented
3960 by ALLOCNO were assigned. */
3962 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno
)
3968 n
= ALLOCNO_COALESCE_DATA (allocno
)->temp
;
3969 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
3970 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
3972 int nr
= ALLOCNO_NUM_OBJECTS (a
);
3973 for (i
= 0; i
< nr
; i
++)
3975 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
3977 r
= ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj
));
3978 slot_coalesced_allocnos_live_ranges
[n
]
3979 = ira_merge_live_ranges
3980 (slot_coalesced_allocnos_live_ranges
[n
], r
);
3987 /* We have coalesced allocnos involving in copies. Coalesce allocnos
3988 further in order to share the same memory stack slot. Allocnos
3989 representing sets of allocnos coalesced before the call are given
3990 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
3991 some allocnos were coalesced in the function. */
3993 coalesce_spill_slots (ira_allocno_t
*spilled_coalesced_allocnos
, int num
)
3995 int i
, j
, n
, last_coalesced_allocno_num
;
3996 ira_allocno_t allocno
, a
;
3997 bool merged_p
= false;
3998 bitmap set_jump_crosses
= regstat_get_setjmp_crosses ();
4000 slot_coalesced_allocnos_live_ranges
4001 = (live_range_t
*) ira_allocate (sizeof (live_range_t
) * ira_allocnos_num
);
4002 memset (slot_coalesced_allocnos_live_ranges
, 0,
4003 sizeof (live_range_t
) * ira_allocnos_num
);
4004 last_coalesced_allocno_num
= 0;
4005 /* Coalesce non-conflicting spilled allocnos preferring most
4007 for (i
= 0; i
< num
; i
++)
4009 allocno
= spilled_coalesced_allocnos
[i
];
4010 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
4011 || bitmap_bit_p (set_jump_crosses
, ALLOCNO_REGNO (allocno
))
4012 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno
)))
4014 for (j
= 0; j
< i
; j
++)
4016 a
= spilled_coalesced_allocnos
[j
];
4017 n
= ALLOCNO_COALESCE_DATA (a
)->temp
;
4018 if (ALLOCNO_COALESCE_DATA (a
)->first
== a
4019 && ! bitmap_bit_p (set_jump_crosses
, ALLOCNO_REGNO (a
))
4020 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a
))
4021 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno
, n
))
4026 /* No coalescing: set up number for coalesced allocnos
4027 represented by ALLOCNO. */
4028 ALLOCNO_COALESCE_DATA (allocno
)->temp
= last_coalesced_allocno_num
++;
4029 setup_slot_coalesced_allocno_live_ranges (allocno
);
4033 allocno_coalesced_p
= true;
4035 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4036 fprintf (ira_dump_file
,
4037 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4038 ALLOCNO_NUM (allocno
), ALLOCNO_REGNO (allocno
),
4039 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
));
4040 ALLOCNO_COALESCE_DATA (allocno
)->temp
4041 = ALLOCNO_COALESCE_DATA (a
)->temp
;
4042 setup_slot_coalesced_allocno_live_ranges (allocno
);
4043 merge_allocnos (a
, allocno
);
4044 ira_assert (ALLOCNO_COALESCE_DATA (a
)->first
== a
);
4047 for (i
= 0; i
< ira_allocnos_num
; i
++)
4048 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges
[i
]);
4049 ira_free (slot_coalesced_allocnos_live_ranges
);
4053 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4054 subsequent assigning stack slots to them in the reload pass. To do
4055 this we coalesce spilled allocnos first to decrease the number of
4056 memory-memory move insns. This function is called by the
4059 ira_sort_regnos_for_alter_reg (int *pseudo_regnos
, int n
,
4060 unsigned int *reg_max_ref_width
)
4062 int max_regno
= max_reg_num ();
4063 int i
, regno
, num
, slot_num
;
4064 ira_allocno_t allocno
, a
;
4065 ira_allocno_iterator ai
;
4066 ira_allocno_t
*spilled_coalesced_allocnos
;
4068 /* Set up allocnos can be coalesced. */
4069 coloring_allocno_bitmap
= ira_allocate_bitmap ();
4070 for (i
= 0; i
< n
; i
++)
4072 regno
= pseudo_regnos
[i
];
4073 allocno
= ira_regno_allocno_map
[regno
];
4074 if (allocno
!= NULL
)
4075 bitmap_set_bit (coloring_allocno_bitmap
, ALLOCNO_NUM (allocno
));
4077 allocno_coalesced_p
= false;
4078 processed_coalesced_allocno_bitmap
= ira_allocate_bitmap ();
4079 allocno_coalesce_data
4080 = (coalesce_data_t
) ira_allocate (sizeof (struct coalesce_data
)
4081 * ira_allocnos_num
);
4082 /* Initialize coalesce data for allocnos. */
4083 FOR_EACH_ALLOCNO (a
, ai
)
4085 ALLOCNO_ADD_DATA (a
) = allocno_coalesce_data
+ ALLOCNO_NUM (a
);
4086 ALLOCNO_COALESCE_DATA (a
)->first
= a
;
4087 ALLOCNO_COALESCE_DATA (a
)->next
= a
;
4089 coalesce_allocnos ();
4090 ira_free_bitmap (coloring_allocno_bitmap
);
4091 regno_coalesced_allocno_cost
4092 = (int *) ira_allocate (max_regno
* sizeof (int));
4093 regno_coalesced_allocno_num
4094 = (int *) ira_allocate (max_regno
* sizeof (int));
4095 memset (regno_coalesced_allocno_num
, 0, max_regno
* sizeof (int));
4096 setup_coalesced_allocno_costs_and_nums (pseudo_regnos
, n
);
4097 /* Sort regnos according frequencies of the corresponding coalesced
4099 qsort (pseudo_regnos
, n
, sizeof (int), coalesced_pseudo_reg_freq_compare
);
4100 spilled_coalesced_allocnos
4101 = (ira_allocno_t
*) ira_allocate (ira_allocnos_num
4102 * sizeof (ira_allocno_t
));
4103 /* Collect allocnos representing the spilled coalesced allocno
4105 num
= collect_spilled_coalesced_allocnos (pseudo_regnos
, n
,
4106 spilled_coalesced_allocnos
);
4107 if (flag_ira_share_spill_slots
4108 && coalesce_spill_slots (spilled_coalesced_allocnos
, num
))
4110 setup_coalesced_allocno_costs_and_nums (pseudo_regnos
, n
);
4111 qsort (pseudo_regnos
, n
, sizeof (int),
4112 coalesced_pseudo_reg_freq_compare
);
4113 num
= collect_spilled_coalesced_allocnos (pseudo_regnos
, n
,
4114 spilled_coalesced_allocnos
);
4116 ira_free_bitmap (processed_coalesced_allocno_bitmap
);
4117 allocno_coalesced_p
= false;
4118 /* Assign stack slot numbers to spilled allocno sets, use smaller
4119 numbers for most frequently used coalesced allocnos. -1 is
4120 reserved for dynamic search of stack slots for pseudos spilled by
4123 for (i
= 0; i
< num
; i
++)
4125 allocno
= spilled_coalesced_allocnos
[i
];
4126 if (ALLOCNO_COALESCE_DATA (allocno
)->first
!= allocno
4127 || ALLOCNO_HARD_REGNO (allocno
) >= 0
4128 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno
)))
4130 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4131 fprintf (ira_dump_file
, " Slot %d (freq,size):", slot_num
);
4133 for (a
= ALLOCNO_COALESCE_DATA (allocno
)->next
;;
4134 a
= ALLOCNO_COALESCE_DATA (a
)->next
)
4136 ira_assert (ALLOCNO_HARD_REGNO (a
) < 0);
4137 ALLOCNO_HARD_REGNO (a
) = -slot_num
;
4138 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4139 fprintf (ira_dump_file
, " a%dr%d(%d,%d)",
4140 ALLOCNO_NUM (a
), ALLOCNO_REGNO (a
), ALLOCNO_FREQ (a
),
4141 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a
)),
4142 reg_max_ref_width
[ALLOCNO_REGNO (a
)]));
4147 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4148 fprintf (ira_dump_file
, "\n");
4150 ira_spilled_reg_stack_slots_num
= slot_num
- 1;
4151 ira_free (spilled_coalesced_allocnos
);
4152 /* Sort regnos according the slot numbers. */
4153 regno_max_ref_width
= reg_max_ref_width
;
4154 qsort (pseudo_regnos
, n
, sizeof (int), coalesced_pseudo_reg_slot_compare
);
4155 FOR_EACH_ALLOCNO (a
, ai
)
4156 ALLOCNO_ADD_DATA (a
) = NULL
;
4157 ira_free (allocno_coalesce_data
);
4158 ira_free (regno_coalesced_allocno_num
);
4159 ira_free (regno_coalesced_allocno_cost
);
4164 /* This page contains code used by the reload pass to improve the
4167 /* The function is called from reload to mark changes in the
4168 allocation of REGNO made by the reload. Remember that reg_renumber
4169 reflects the change result. */
4171 ira_mark_allocation_change (int regno
)
4173 ira_allocno_t a
= ira_regno_allocno_map
[regno
];
4174 int old_hard_regno
, hard_regno
, cost
;
4175 enum reg_class aclass
= ALLOCNO_CLASS (a
);
4177 ira_assert (a
!= NULL
);
4178 hard_regno
= reg_renumber
[regno
];
4179 if ((old_hard_regno
= ALLOCNO_HARD_REGNO (a
)) == hard_regno
)
4181 if (old_hard_regno
< 0)
4182 cost
= -ALLOCNO_MEMORY_COST (a
);
4185 ira_assert (ira_class_hard_reg_index
[aclass
][old_hard_regno
] >= 0);
4186 cost
= -(ALLOCNO_HARD_REG_COSTS (a
) == NULL
4187 ? ALLOCNO_CLASS_COST (a
)
4188 : ALLOCNO_HARD_REG_COSTS (a
)
4189 [ira_class_hard_reg_index
[aclass
][old_hard_regno
]]);
4190 update_costs_from_copies (a
, false, false);
4192 ira_overall_cost
-= cost
;
4193 ALLOCNO_HARD_REGNO (a
) = hard_regno
;
4196 ALLOCNO_HARD_REGNO (a
) = -1;
4197 cost
+= ALLOCNO_MEMORY_COST (a
);
4199 else if (ira_class_hard_reg_index
[aclass
][hard_regno
] >= 0)
4201 cost
+= (ALLOCNO_HARD_REG_COSTS (a
) == NULL
4202 ? ALLOCNO_CLASS_COST (a
)
4203 : ALLOCNO_HARD_REG_COSTS (a
)
4204 [ira_class_hard_reg_index
[aclass
][hard_regno
]]);
4205 update_costs_from_copies (a
, true, false);
4208 /* Reload changed class of the allocno. */
4210 ira_overall_cost
+= cost
;
4213 /* This function is called when reload deletes memory-memory move. In
4214 this case we marks that the allocation of the corresponding
4215 allocnos should be not changed in future. Otherwise we risk to get
4218 ira_mark_memory_move_deletion (int dst_regno
, int src_regno
)
4220 ira_allocno_t dst
= ira_regno_allocno_map
[dst_regno
];
4221 ira_allocno_t src
= ira_regno_allocno_map
[src_regno
];
4223 ira_assert (dst
!= NULL
&& src
!= NULL
4224 && ALLOCNO_HARD_REGNO (dst
) < 0
4225 && ALLOCNO_HARD_REGNO (src
) < 0);
4226 ALLOCNO_DONT_REASSIGN_P (dst
) = true;
4227 ALLOCNO_DONT_REASSIGN_P (src
) = true;
4230 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4231 allocno A and return TRUE in the case of success. */
4233 allocno_reload_assign (ira_allocno_t a
, HARD_REG_SET forbidden_regs
)
4236 enum reg_class aclass
;
4237 int regno
= ALLOCNO_REGNO (a
);
4238 HARD_REG_SET saved
[2];
4241 n
= ALLOCNO_NUM_OBJECTS (a
);
4242 for (i
= 0; i
< n
; i
++)
4244 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
4245 COPY_HARD_REG_SET (saved
[i
], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
));
4246 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), forbidden_regs
);
4247 if (! flag_caller_saves
&& ALLOCNO_CALLS_CROSSED_NUM (a
) != 0)
4248 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
),
4251 ALLOCNO_ASSIGNED_P (a
) = false;
4252 aclass
= ALLOCNO_CLASS (a
);
4253 update_curr_costs (a
);
4254 assign_hard_reg (a
, true);
4255 hard_regno
= ALLOCNO_HARD_REGNO (a
);
4256 reg_renumber
[regno
] = hard_regno
;
4258 ALLOCNO_HARD_REGNO (a
) = -1;
4261 ira_assert (ira_class_hard_reg_index
[aclass
][hard_regno
] >= 0);
4263 -= (ALLOCNO_MEMORY_COST (a
)
4264 - (ALLOCNO_HARD_REG_COSTS (a
) == NULL
4265 ? ALLOCNO_CLASS_COST (a
)
4266 : ALLOCNO_HARD_REG_COSTS (a
)[ira_class_hard_reg_index
4267 [aclass
][hard_regno
]]));
4268 if (ALLOCNO_CALLS_CROSSED_NUM (a
) != 0
4269 && ira_hard_reg_set_intersection_p (hard_regno
, ALLOCNO_MODE (a
),
4272 ira_assert (flag_caller_saves
);
4273 caller_save_needed
= 1;
4277 /* If we found a hard register, modify the RTL for the pseudo
4278 register to show the hard register, and mark the pseudo register
4280 if (reg_renumber
[regno
] >= 0)
4282 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4283 fprintf (ira_dump_file
, ": reassign to %d\n", reg_renumber
[regno
]);
4284 SET_REGNO (regno_reg_rtx
[regno
], reg_renumber
[regno
]);
4285 mark_home_live (regno
);
4287 else if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4288 fprintf (ira_dump_file
, "\n");
4289 for (i
= 0; i
< n
; i
++)
4291 ira_object_t obj
= ALLOCNO_OBJECT (a
, i
);
4292 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj
), saved
[i
]);
4294 return reg_renumber
[regno
] >= 0;
4297 /* Sort pseudos according their usage frequencies (putting most
4298 frequently ones first). */
4300 pseudo_reg_compare (const void *v1p
, const void *v2p
)
4302 int regno1
= *(const int *) v1p
;
4303 int regno2
= *(const int *) v2p
;
4306 if ((diff
= REG_FREQ (regno2
) - REG_FREQ (regno1
)) != 0)
4308 return regno1
- regno2
;
4311 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4312 NUM of them) or spilled pseudos conflicting with pseudos in
4313 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4314 allocation has been changed. The function doesn't use
4315 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4316 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4317 is called by the reload pass at the end of each reload
4320 ira_reassign_pseudos (int *spilled_pseudo_regs
, int num
,
4321 HARD_REG_SET bad_spill_regs
,
4322 HARD_REG_SET
*pseudo_forbidden_regs
,
4323 HARD_REG_SET
*pseudo_previous_regs
,
4329 HARD_REG_SET forbidden_regs
;
4330 bitmap temp
= BITMAP_ALLOC (NULL
);
4332 /* Add pseudos which conflict with pseudos already in
4333 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4334 to allocating in two steps as some of the conflicts might have
4335 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4336 for (i
= 0; i
< num
; i
++)
4337 bitmap_set_bit (temp
, spilled_pseudo_regs
[i
]);
4339 for (i
= 0, n
= num
; i
< n
; i
++)
4342 int regno
= spilled_pseudo_regs
[i
];
4343 bitmap_set_bit (temp
, regno
);
4345 a
= ira_regno_allocno_map
[regno
];
4346 nr
= ALLOCNO_NUM_OBJECTS (a
);
4347 for (j
= 0; j
< nr
; j
++)
4349 ira_object_t conflict_obj
;
4350 ira_object_t obj
= ALLOCNO_OBJECT (a
, j
);
4351 ira_object_conflict_iterator oci
;
4353 FOR_EACH_OBJECT_CONFLICT (obj
, conflict_obj
, oci
)
4355 ira_allocno_t conflict_a
= OBJECT_ALLOCNO (conflict_obj
);
4356 if (ALLOCNO_HARD_REGNO (conflict_a
) < 0
4357 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a
)
4358 && bitmap_set_bit (temp
, ALLOCNO_REGNO (conflict_a
)))
4360 spilled_pseudo_regs
[num
++] = ALLOCNO_REGNO (conflict_a
);
4361 /* ?!? This seems wrong. */
4362 bitmap_set_bit (consideration_allocno_bitmap
,
4363 ALLOCNO_NUM (conflict_a
));
4370 qsort (spilled_pseudo_regs
, num
, sizeof (int), pseudo_reg_compare
);
4372 /* Try to assign hard registers to pseudos from
4373 SPILLED_PSEUDO_REGS. */
4374 for (i
= 0; i
< num
; i
++)
4376 regno
= spilled_pseudo_regs
[i
];
4377 COPY_HARD_REG_SET (forbidden_regs
, bad_spill_regs
);
4378 IOR_HARD_REG_SET (forbidden_regs
, pseudo_forbidden_regs
[regno
]);
4379 IOR_HARD_REG_SET (forbidden_regs
, pseudo_previous_regs
[regno
]);
4380 gcc_assert (reg_renumber
[regno
] < 0);
4381 a
= ira_regno_allocno_map
[regno
];
4382 ira_mark_allocation_change (regno
);
4383 ira_assert (reg_renumber
[regno
] < 0);
4384 if (internal_flag_ira_verbose
> 3 && ira_dump_file
!= NULL
)
4385 fprintf (ira_dump_file
,
4386 " Try Assign %d(a%d), cost=%d", regno
, ALLOCNO_NUM (a
),
4387 ALLOCNO_MEMORY_COST (a
)
4388 - ALLOCNO_CLASS_COST (a
));
4389 allocno_reload_assign (a
, forbidden_regs
);
4390 if (reg_renumber
[regno
] >= 0)
4392 CLEAR_REGNO_REG_SET (spilled
, regno
);
4400 /* The function is called by reload and returns already allocated
4401 stack slot (if any) for REGNO with given INHERENT_SIZE and
4402 TOTAL_SIZE. In the case of failure to find a slot which can be
4403 used for REGNO, the function returns NULL. */
4405 ira_reuse_stack_slot (int regno
, unsigned int inherent_size
,
4406 unsigned int total_size
)
4409 int slot_num
, best_slot_num
;
4410 int cost
, best_cost
;
4411 ira_copy_t cp
, next_cp
;
4412 ira_allocno_t another_allocno
, allocno
= ira_regno_allocno_map
[regno
];
4415 struct ira_spilled_reg_stack_slot
*slot
= NULL
;
4417 ira_assert (inherent_size
== PSEUDO_REGNO_BYTES (regno
)
4418 && inherent_size
<= total_size
4419 && ALLOCNO_HARD_REGNO (allocno
) < 0);
4420 if (! flag_ira_share_spill_slots
)
4422 slot_num
= -ALLOCNO_HARD_REGNO (allocno
) - 2;
4425 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4430 best_cost
= best_slot_num
= -1;
4432 /* It means that the pseudo was spilled in the reload pass, try
4435 slot_num
< ira_spilled_reg_stack_slots_num
;
4438 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4439 if (slot
->mem
== NULL_RTX
)
4441 if (slot
->width
< total_size
4442 || GET_MODE_SIZE (GET_MODE (slot
->mem
)) < inherent_size
)
4445 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4446 FIRST_PSEUDO_REGISTER
, i
, bi
)
4448 another_allocno
= ira_regno_allocno_map
[i
];
4449 if (allocnos_conflict_by_live_ranges_p (allocno
,
4453 for (cost
= 0, cp
= ALLOCNO_COPIES (allocno
);
4457 if (cp
->first
== allocno
)
4459 next_cp
= cp
->next_first_allocno_copy
;
4460 another_allocno
= cp
->second
;
4462 else if (cp
->second
== allocno
)
4464 next_cp
= cp
->next_second_allocno_copy
;
4465 another_allocno
= cp
->first
;
4469 if (cp
->insn
== NULL_RTX
)
4471 if (bitmap_bit_p (&slot
->spilled_regs
,
4472 ALLOCNO_REGNO (another_allocno
)))
4475 if (cost
> best_cost
)
4478 best_slot_num
= slot_num
;
4485 slot_num
= best_slot_num
;
4486 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4487 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4489 ALLOCNO_HARD_REGNO (allocno
) = -slot_num
- 2;
4494 ira_assert (slot
->width
>= total_size
);
4495 #ifdef ENABLE_IRA_CHECKING
4496 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4497 FIRST_PSEUDO_REGISTER
, i
, bi
)
4499 ira_assert (! conflict_by_live_ranges_p (regno
, i
));
4502 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4503 if (internal_flag_ira_verbose
> 3 && ira_dump_file
)
4505 fprintf (ira_dump_file
, " Assigning %d(freq=%d) slot %d of",
4506 regno
, REG_FREQ (regno
), slot_num
);
4507 EXECUTE_IF_SET_IN_BITMAP (&slot
->spilled_regs
,
4508 FIRST_PSEUDO_REGISTER
, i
, bi
)
4510 if ((unsigned) regno
!= i
)
4511 fprintf (ira_dump_file
, " %d", i
);
4513 fprintf (ira_dump_file
, "\n");
4519 /* This is called by reload every time a new stack slot X with
4520 TOTAL_SIZE was allocated for REGNO. We store this info for
4521 subsequent ira_reuse_stack_slot calls. */
4523 ira_mark_new_stack_slot (rtx x
, int regno
, unsigned int total_size
)
4525 struct ira_spilled_reg_stack_slot
*slot
;
4527 ira_allocno_t allocno
;
4529 ira_assert (PSEUDO_REGNO_BYTES (regno
) <= total_size
);
4530 allocno
= ira_regno_allocno_map
[regno
];
4531 slot_num
= -ALLOCNO_HARD_REGNO (allocno
) - 2;
4534 slot_num
= ira_spilled_reg_stack_slots_num
++;
4535 ALLOCNO_HARD_REGNO (allocno
) = -slot_num
- 2;
4537 slot
= &ira_spilled_reg_stack_slots
[slot_num
];
4538 INIT_REG_SET (&slot
->spilled_regs
);
4539 SET_REGNO_REG_SET (&slot
->spilled_regs
, regno
);
4541 slot
->width
= total_size
;
4542 if (internal_flag_ira_verbose
> 3 && ira_dump_file
)
4543 fprintf (ira_dump_file
, " Assigning %d(freq=%d) a new slot %d\n",
4544 regno
, REG_FREQ (regno
), slot_num
);
4548 /* Return spill cost for pseudo-registers whose numbers are in array
4549 REGNOS (with a negative number as an end marker) for reload with
4550 given IN and OUT for INSN. Return also number points (through
4551 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4552 the register pressure is high, number of references of the
4553 pseudo-registers (through NREFS), number of callee-clobbered
4554 hard-registers occupied by the pseudo-registers (through
4555 CALL_USED_COUNT), and the first hard regno occupied by the
4556 pseudo-registers (through FIRST_HARD_REGNO). */
4558 calculate_spill_cost (int *regnos
, rtx in
, rtx out
, rtx insn
,
4559 int *excess_pressure_live_length
,
4560 int *nrefs
, int *call_used_count
, int *first_hard_regno
)
4562 int i
, cost
, regno
, hard_regno
, j
, count
, saved_cost
, nregs
;
4568 for (length
= count
= cost
= i
= 0;; i
++)
4573 *nrefs
+= REG_N_REFS (regno
);
4574 hard_regno
= reg_renumber
[regno
];
4575 ira_assert (hard_regno
>= 0);
4576 a
= ira_regno_allocno_map
[regno
];
4577 length
+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a
) / ALLOCNO_NUM_OBJECTS (a
);
4578 cost
+= ALLOCNO_MEMORY_COST (a
) - ALLOCNO_CLASS_COST (a
);
4579 nregs
= hard_regno_nregs
[hard_regno
][ALLOCNO_MODE (a
)];
4580 for (j
= 0; j
< nregs
; j
++)
4581 if (! TEST_HARD_REG_BIT (call_used_reg_set
, hard_regno
+ j
))
4585 in_p
= in
&& REG_P (in
) && (int) REGNO (in
) == hard_regno
;
4586 out_p
= out
&& REG_P (out
) && (int) REGNO (out
) == hard_regno
;
4588 && find_regno_note (insn
, REG_DEAD
, hard_regno
) != NULL_RTX
)
4592 saved_cost
+= ira_memory_move_cost
4593 [ALLOCNO_MODE (a
)][ALLOCNO_CLASS (a
)][1];
4596 += ira_memory_move_cost
4597 [ALLOCNO_MODE (a
)][ALLOCNO_CLASS (a
)][0];
4598 cost
-= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn
)) * saved_cost
;
4601 *excess_pressure_live_length
= length
;
4602 *call_used_count
= count
;
4606 hard_regno
= reg_renumber
[regnos
[0]];
4608 *first_hard_regno
= hard_regno
;
4612 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4613 REGNOS is better than spilling pseudo-registers with numbers in
4614 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4615 function used by the reload pass to make better register spilling
4618 ira_better_spill_reload_regno_p (int *regnos
, int *other_regnos
,
4619 rtx in
, rtx out
, rtx insn
)
4621 int cost
, other_cost
;
4622 int length
, other_length
;
4623 int nrefs
, other_nrefs
;
4624 int call_used_count
, other_call_used_count
;
4625 int hard_regno
, other_hard_regno
;
4627 cost
= calculate_spill_cost (regnos
, in
, out
, insn
,
4628 &length
, &nrefs
, &call_used_count
, &hard_regno
);
4629 other_cost
= calculate_spill_cost (other_regnos
, in
, out
, insn
,
4630 &other_length
, &other_nrefs
,
4631 &other_call_used_count
,
4633 if (nrefs
== 0 && other_nrefs
!= 0)
4635 if (nrefs
!= 0 && other_nrefs
== 0)
4637 if (cost
!= other_cost
)
4638 return cost
< other_cost
;
4639 if (length
!= other_length
)
4640 return length
> other_length
;
4641 #ifdef REG_ALLOC_ORDER
4642 if (hard_regno
>= 0 && other_hard_regno
>= 0)
4643 return (inv_reg_alloc_order
[hard_regno
]
4644 < inv_reg_alloc_order
[other_hard_regno
]);
4646 if (call_used_count
!= other_call_used_count
)
4647 return call_used_count
> other_call_used_count
;
4654 /* Allocate and initialize data necessary for assign_hard_reg. */
4656 ira_initiate_assign (void)
4659 = (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
4660 * ira_allocnos_num
);
4661 consideration_allocno_bitmap
= ira_allocate_bitmap ();
4662 initiate_cost_update ();
4663 allocno_priorities
= (int *) ira_allocate (sizeof (int) * ira_allocnos_num
);
4664 sorted_copies
= (ira_copy_t
*) ira_allocate (ira_copies_num
4665 * sizeof (ira_copy_t
));
4668 /* Deallocate data used by assign_hard_reg. */
4670 ira_finish_assign (void)
4672 ira_free (sorted_allocnos
);
4673 ira_free_bitmap (consideration_allocno_bitmap
);
4674 finish_cost_update ();
4675 ira_free (allocno_priorities
);
4676 ira_free (sorted_copies
);
4681 /* Entry function doing color-based register allocation. */
4685 allocno_stack_vec
.create (ira_allocnos_num
);
4686 memset (allocated_hardreg_p
, 0, sizeof (allocated_hardreg_p
));
4687 ira_initiate_assign ();
4689 ira_finish_assign ();
4690 allocno_stack_vec
.release ();
4691 move_spill_restore ();
4696 /* This page contains a simple register allocator without usage of
4697 allocno conflicts. This is used for fast allocation for -O0. */
4699 /* Do register allocation by not using allocno conflicts. It uses
4700 only allocno live ranges. The algorithm is close to Chow's
4701 priority coloring. */
4703 fast_allocation (void)
4705 int i
, j
, k
, num
, class_size
, hard_regno
;
4707 bool no_stack_reg_p
;
4709 enum reg_class aclass
;
4710 enum machine_mode mode
;
4712 ira_allocno_iterator ai
;
4714 HARD_REG_SET conflict_hard_regs
, *used_hard_regs
;
4716 sorted_allocnos
= (ira_allocno_t
*) ira_allocate (sizeof (ira_allocno_t
)
4717 * ira_allocnos_num
);
4719 FOR_EACH_ALLOCNO (a
, ai
)
4720 sorted_allocnos
[num
++] = a
;
4721 allocno_priorities
= (int *) ira_allocate (sizeof (int) * ira_allocnos_num
);
4722 setup_allocno_priorities (sorted_allocnos
, num
);
4723 used_hard_regs
= (HARD_REG_SET
*) ira_allocate (sizeof (HARD_REG_SET
)
4725 for (i
= 0; i
< ira_max_point
; i
++)
4726 CLEAR_HARD_REG_SET (used_hard_regs
[i
]);
4727 qsort (sorted_allocnos
, num
, sizeof (ira_allocno_t
),
4728 allocno_priority_compare_func
);
4729 for (i
= 0; i
< num
; i
++)
4733 a
= sorted_allocnos
[i
];
4734 nr
= ALLOCNO_NUM_OBJECTS (a
);
4735 CLEAR_HARD_REG_SET (conflict_hard_regs
);
4736 for (l
= 0; l
< nr
; l
++)
4738 ira_object_t obj
= ALLOCNO_OBJECT (a
, l
);
4739 IOR_HARD_REG_SET (conflict_hard_regs
,
4740 OBJECT_CONFLICT_HARD_REGS (obj
));
4741 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
4742 for (j
= r
->start
; j
<= r
->finish
; j
++)
4743 IOR_HARD_REG_SET (conflict_hard_regs
, used_hard_regs
[j
]);
4745 aclass
= ALLOCNO_CLASS (a
);
4746 ALLOCNO_ASSIGNED_P (a
) = true;
4747 ALLOCNO_HARD_REGNO (a
) = -1;
4748 if (hard_reg_set_subset_p (reg_class_contents
[aclass
],
4749 conflict_hard_regs
))
4751 mode
= ALLOCNO_MODE (a
);
4753 no_stack_reg_p
= ALLOCNO_NO_STACK_REG_P (a
);
4755 class_size
= ira_class_hard_regs_num
[aclass
];
4756 for (j
= 0; j
< class_size
; j
++)
4758 hard_regno
= ira_class_hard_regs
[aclass
][j
];
4760 if (no_stack_reg_p
&& FIRST_STACK_REG
<= hard_regno
4761 && hard_regno
<= LAST_STACK_REG
)
4764 if (ira_hard_reg_set_intersection_p (hard_regno
, mode
, conflict_hard_regs
)
4765 || (TEST_HARD_REG_BIT
4766 (ira_prohibited_class_mode_regs
[aclass
][mode
], hard_regno
)))
4768 ALLOCNO_HARD_REGNO (a
) = hard_regno
;
4769 for (l
= 0; l
< nr
; l
++)
4771 ira_object_t obj
= ALLOCNO_OBJECT (a
, l
);
4772 for (r
= OBJECT_LIVE_RANGES (obj
); r
!= NULL
; r
= r
->next
)
4773 for (k
= r
->start
; k
<= r
->finish
; k
++)
4774 IOR_HARD_REG_SET (used_hard_regs
[k
],
4775 ira_reg_mode_hard_regset
[hard_regno
][mode
]);
4780 ira_free (sorted_allocnos
);
4781 ira_free (used_hard_regs
);
4782 ira_free (allocno_priorities
);
4783 if (internal_flag_ira_verbose
> 1 && ira_dump_file
!= NULL
)
4784 ira_print_disposition (ira_dump_file
);
4789 /* Entry function doing coloring. */
4794 ira_allocno_iterator ai
;
4796 /* Setup updated costs. */
4797 FOR_EACH_ALLOCNO (a
, ai
)
4799 ALLOCNO_UPDATED_MEMORY_COST (a
) = ALLOCNO_MEMORY_COST (a
);
4800 ALLOCNO_UPDATED_CLASS_COST (a
) = ALLOCNO_CLASS_COST (a
);
4802 if (ira_conflicts_p
)