always define DYNAMIC_CHAIN_ADDRESS
[official-gcc.git] / gcc / ira-color.c
bloba4820fb51496b060ee716a1d5c9676737584ae0b
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "target.h"
31 #include "regs.h"
32 #include "flags.h"
33 #include "alias.h"
34 #include "insn-config.h"
35 #include "expmed.h"
36 #include "dojump.h"
37 #include "explow.h"
38 #include "calls.h"
39 #include "emit-rtl.h"
40 #include "varasm.h"
41 #include "stmt.h"
42 #include "expr.h"
43 #include "diagnostic-core.h"
44 #include "reload.h"
45 #include "params.h"
46 #include "cfgloop.h"
47 #include "ira.h"
48 #include "alloc-pool.h"
49 #include "ira-int.h"
51 typedef struct allocno_hard_regs *allocno_hard_regs_t;
53 /* The structure contains information about hard registers can be
54 assigned to allocnos. Usually it is allocno profitable hard
55 registers but in some cases this set can be a bit different. Major
56 reason of the difference is a requirement to use hard register sets
57 that form a tree or a forest (set of trees), i.e. hard register set
58 of a node should contain hard register sets of its subnodes. */
59 struct allocno_hard_regs
61 /* Hard registers can be assigned to an allocno. */
62 HARD_REG_SET set;
63 /* Overall (spilling) cost of all allocnos with given register
64 set. */
65 int64_t cost;
68 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
70 /* A node representing allocno hard registers. Such nodes form a
71 forest (set of trees). Each subnode of given node in the forest
72 refers for hard register set (usually allocno profitable hard
73 register set) which is a subset of one referred from given
74 node. */
75 struct allocno_hard_regs_node
77 /* Set up number of the node in preorder traversing of the forest. */
78 int preorder_num;
79 /* Used for different calculation like finding conflict size of an
80 allocno. */
81 int check;
82 /* Used for calculation of conflict size of an allocno. The
83 conflict size of the allocno is maximal number of given allocno
84 hard registers needed for allocation of the conflicting allocnos.
85 Given allocno is trivially colored if this number plus the number
86 of hard registers needed for given allocno is not greater than
87 the number of given allocno hard register set. */
88 int conflict_size;
89 /* The number of hard registers given by member hard_regs. */
90 int hard_regs_num;
91 /* The following member is used to form the final forest. */
92 bool used_p;
93 /* Pointer to the corresponding profitable hard registers. */
94 allocno_hard_regs_t hard_regs;
95 /* Parent, first subnode, previous and next node with the same
96 parent in the forest. */
97 allocno_hard_regs_node_t parent, first, prev, next;
100 /* Info about changing hard reg costs of an allocno. */
101 struct update_cost_record
103 /* Hard regno for which we changed the cost. */
104 int hard_regno;
105 /* Divisor used when we changed the cost of HARD_REGNO. */
106 int divisor;
107 /* Next record for given allocno. */
108 struct update_cost_record *next;
111 /* To decrease footprint of ira_allocno structure we store all data
112 needed only for coloring in the following structure. */
113 struct allocno_color_data
115 /* TRUE value means that the allocno was not removed yet from the
116 conflicting graph during coloring. */
117 unsigned int in_graph_p : 1;
118 /* TRUE if it is put on the stack to make other allocnos
119 colorable. */
120 unsigned int may_be_spilled_p : 1;
121 /* TRUE if the allocno is trivially colorable. */
122 unsigned int colorable_p : 1;
123 /* Number of hard registers of the allocno class really
124 available for the allocno allocation. It is number of the
125 profitable hard regs. */
126 int available_regs_num;
127 /* Allocnos in a bucket (used in coloring) chained by the following
128 two members. */
129 ira_allocno_t next_bucket_allocno;
130 ira_allocno_t prev_bucket_allocno;
131 /* Used for temporary purposes. */
132 int temp;
133 /* Used to exclude repeated processing. */
134 int last_process;
135 /* Profitable hard regs available for this pseudo allocation. It
136 means that the set excludes unavailable hard regs and hard regs
137 conflicting with given pseudo. They should be of the allocno
138 class. */
139 HARD_REG_SET profitable_hard_regs;
140 /* The allocno hard registers node. */
141 allocno_hard_regs_node_t hard_regs_node;
142 /* Array of structures allocno_hard_regs_subnode representing
143 given allocno hard registers node (the 1st element in the array)
144 and all its subnodes in the tree (forest) of allocno hard
145 register nodes (see comments above). */
146 int hard_regs_subnodes_start;
147 /* The length of the previous array. */
148 int hard_regs_subnodes_num;
149 /* Records about updating allocno hard reg costs from copies. If
150 the allocno did not get expected hard register, these records are
151 used to restore original hard reg costs of allocnos connected to
152 this allocno by copies. */
153 struct update_cost_record *update_cost_records;
154 /* Threads. We collect allocnos connected by copies into threads
155 and try to assign hard regs to allocnos by threads. */
156 /* Allocno representing all thread. */
157 ira_allocno_t first_thread_allocno;
158 /* Allocnos in thread forms a cycle list through the following
159 member. */
160 ira_allocno_t next_thread_allocno;
161 /* All thread frequency. Defined only for first thread allocno. */
162 int thread_freq;
165 /* See above. */
166 typedef struct allocno_color_data *allocno_color_data_t;
168 /* Container for storing allocno data concerning coloring. */
169 static allocno_color_data_t allocno_color_data;
171 /* Macro to access the data concerning coloring. */
172 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
174 /* Used for finding allocno colorability to exclude repeated allocno
175 processing and for updating preferencing to exclude repeated
176 allocno processing during assignment. */
177 static int curr_allocno_process;
179 /* This file contains code for regional graph coloring, spill/restore
180 code placement optimization, and code helping the reload pass to do
181 a better job. */
183 /* Bitmap of allocnos which should be colored. */
184 static bitmap coloring_allocno_bitmap;
186 /* Bitmap of allocnos which should be taken into account during
187 coloring. In general case it contains allocnos from
188 coloring_allocno_bitmap plus other already colored conflicting
189 allocnos. */
190 static bitmap consideration_allocno_bitmap;
192 /* All allocnos sorted according their priorities. */
193 static ira_allocno_t *sorted_allocnos;
195 /* Vec representing the stack of allocnos used during coloring. */
196 static vec<ira_allocno_t> allocno_stack_vec;
198 /* Helper for qsort comparison callbacks - return a positive integer if
199 X > Y, or a negative value otherwise. Use a conditional expression
200 instead of a difference computation to insulate from possible overflow
201 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
202 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
206 /* Definition of vector of allocno hard registers. */
208 /* Vector of unique allocno hard registers. */
209 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
211 struct allocno_hard_regs_hasher : nofree_ptr_hash <allocno_hard_regs>
213 static inline hashval_t hash (const allocno_hard_regs *);
214 static inline bool equal (const allocno_hard_regs *,
215 const allocno_hard_regs *);
218 /* Returns hash value for allocno hard registers V. */
219 inline hashval_t
220 allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
222 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
225 /* Compares allocno hard registers V1 and V2. */
226 inline bool
227 allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
228 const allocno_hard_regs *hv2)
230 return hard_reg_set_equal_p (hv1->set, hv2->set);
233 /* Hash table of unique allocno hard registers. */
234 static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
236 /* Return allocno hard registers in the hash table equal to HV. */
237 static allocno_hard_regs_t
238 find_hard_regs (allocno_hard_regs_t hv)
240 return allocno_hard_regs_htab->find (hv);
243 /* Insert allocno hard registers HV in the hash table (if it is not
244 there yet) and return the value which in the table. */
245 static allocno_hard_regs_t
246 insert_hard_regs (allocno_hard_regs_t hv)
248 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
250 if (*slot == NULL)
251 *slot = hv;
252 return *slot;
255 /* Initialize data concerning allocno hard registers. */
256 static void
257 init_allocno_hard_regs (void)
259 allocno_hard_regs_vec.create (200);
260 allocno_hard_regs_htab
261 = new hash_table<allocno_hard_regs_hasher> (200);
264 /* Add (or update info about) allocno hard registers with SET and
265 COST. */
266 static allocno_hard_regs_t
267 add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
269 struct allocno_hard_regs temp;
270 allocno_hard_regs_t hv;
272 gcc_assert (! hard_reg_set_empty_p (set));
273 COPY_HARD_REG_SET (temp.set, set);
274 if ((hv = find_hard_regs (&temp)) != NULL)
275 hv->cost += cost;
276 else
278 hv = ((struct allocno_hard_regs *)
279 ira_allocate (sizeof (struct allocno_hard_regs)));
280 COPY_HARD_REG_SET (hv->set, set);
281 hv->cost = cost;
282 allocno_hard_regs_vec.safe_push (hv);
283 insert_hard_regs (hv);
285 return hv;
288 /* Finalize data concerning allocno hard registers. */
289 static void
290 finish_allocno_hard_regs (void)
292 int i;
293 allocno_hard_regs_t hv;
295 for (i = 0;
296 allocno_hard_regs_vec.iterate (i, &hv);
297 i++)
298 ira_free (hv);
299 delete allocno_hard_regs_htab;
300 allocno_hard_regs_htab = NULL;
301 allocno_hard_regs_vec.release ();
304 /* Sort hard regs according to their frequency of usage. */
305 static int
306 allocno_hard_regs_compare (const void *v1p, const void *v2p)
308 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
309 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
311 if (hv2->cost > hv1->cost)
312 return 1;
313 else if (hv2->cost < hv1->cost)
314 return -1;
315 else
316 return 0;
321 /* Used for finding a common ancestor of two allocno hard registers
322 nodes in the forest. We use the current value of
323 'node_check_tick' to mark all nodes from one node to the top and
324 then walking up from another node until we find a marked node.
326 It is also used to figure out allocno colorability as a mark that
327 we already reset value of member 'conflict_size' for the forest
328 node corresponding to the processed allocno. */
329 static int node_check_tick;
331 /* Roots of the forest containing hard register sets can be assigned
332 to allocnos. */
333 static allocno_hard_regs_node_t hard_regs_roots;
335 /* Definition of vector of allocno hard register nodes. */
337 /* Vector used to create the forest. */
338 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
340 /* Create and return allocno hard registers node containing allocno
341 hard registers HV. */
342 static allocno_hard_regs_node_t
343 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
345 allocno_hard_regs_node_t new_node;
347 new_node = ((struct allocno_hard_regs_node *)
348 ira_allocate (sizeof (struct allocno_hard_regs_node)));
349 new_node->check = 0;
350 new_node->hard_regs = hv;
351 new_node->hard_regs_num = hard_reg_set_size (hv->set);
352 new_node->first = NULL;
353 new_node->used_p = false;
354 return new_node;
357 /* Add allocno hard registers node NEW_NODE to the forest on its level
358 given by ROOTS. */
359 static void
360 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
361 allocno_hard_regs_node_t new_node)
363 new_node->next = *roots;
364 if (new_node->next != NULL)
365 new_node->next->prev = new_node;
366 new_node->prev = NULL;
367 *roots = new_node;
370 /* Add allocno hard registers HV (or its best approximation if it is
371 not possible) to the forest on its level given by ROOTS. */
372 static void
373 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
374 allocno_hard_regs_t hv)
376 unsigned int i, start;
377 allocno_hard_regs_node_t node, prev, new_node;
378 HARD_REG_SET temp_set;
379 allocno_hard_regs_t hv2;
381 start = hard_regs_node_vec.length ();
382 for (node = *roots; node != NULL; node = node->next)
384 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
385 return;
386 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
388 add_allocno_hard_regs_to_forest (&node->first, hv);
389 return;
391 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
392 hard_regs_node_vec.safe_push (node);
393 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
395 COPY_HARD_REG_SET (temp_set, hv->set);
396 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
397 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
398 add_allocno_hard_regs_to_forest (&node->first, hv2);
401 if (hard_regs_node_vec.length ()
402 > start + 1)
404 /* Create a new node which contains nodes in hard_regs_node_vec. */
405 CLEAR_HARD_REG_SET (temp_set);
406 for (i = start;
407 i < hard_regs_node_vec.length ();
408 i++)
410 node = hard_regs_node_vec[i];
411 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
413 hv = add_allocno_hard_regs (temp_set, hv->cost);
414 new_node = create_new_allocno_hard_regs_node (hv);
415 prev = NULL;
416 for (i = start;
417 i < hard_regs_node_vec.length ();
418 i++)
420 node = hard_regs_node_vec[i];
421 if (node->prev == NULL)
422 *roots = node->next;
423 else
424 node->prev->next = node->next;
425 if (node->next != NULL)
426 node->next->prev = node->prev;
427 if (prev == NULL)
428 new_node->first = node;
429 else
430 prev->next = node;
431 node->prev = prev;
432 node->next = NULL;
433 prev = node;
435 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
437 hard_regs_node_vec.truncate (start);
440 /* Add allocno hard registers nodes starting with the forest level
441 given by FIRST which contains biggest set inside SET. */
442 static void
443 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
444 HARD_REG_SET set)
446 allocno_hard_regs_node_t node;
448 ira_assert (first != NULL);
449 for (node = first; node != NULL; node = node->next)
450 if (hard_reg_set_subset_p (node->hard_regs->set, set))
451 hard_regs_node_vec.safe_push (node);
452 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
453 collect_allocno_hard_regs_cover (node->first, set);
456 /* Set up field parent as PARENT in all allocno hard registers nodes
457 in forest given by FIRST. */
458 static void
459 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
460 allocno_hard_regs_node_t parent)
462 allocno_hard_regs_node_t node;
464 for (node = first; node != NULL; node = node->next)
466 node->parent = parent;
467 setup_allocno_hard_regs_nodes_parent (node->first, node);
471 /* Return allocno hard registers node which is a first common ancestor
472 node of FIRST and SECOND in the forest. */
473 static allocno_hard_regs_node_t
474 first_common_ancestor_node (allocno_hard_regs_node_t first,
475 allocno_hard_regs_node_t second)
477 allocno_hard_regs_node_t node;
479 node_check_tick++;
480 for (node = first; node != NULL; node = node->parent)
481 node->check = node_check_tick;
482 for (node = second; node != NULL; node = node->parent)
483 if (node->check == node_check_tick)
484 return node;
485 return first_common_ancestor_node (second, first);
488 /* Print hard reg set SET to F. */
489 static void
490 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
492 int i, start;
494 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
496 if (TEST_HARD_REG_BIT (set, i))
498 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
499 start = i;
501 if (start >= 0
502 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
504 if (start == i - 1)
505 fprintf (f, " %d", start);
506 else if (start == i - 2)
507 fprintf (f, " %d %d", start, start + 1);
508 else
509 fprintf (f, " %d-%d", start, i - 1);
510 start = -1;
513 if (new_line_p)
514 fprintf (f, "\n");
517 /* Print allocno hard register subforest given by ROOTS and its LEVEL
518 to F. */
519 static void
520 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
521 int level)
523 int i;
524 allocno_hard_regs_node_t node;
526 for (node = roots; node != NULL; node = node->next)
528 fprintf (f, " ");
529 for (i = 0; i < level * 2; i++)
530 fprintf (f, " ");
531 fprintf (f, "%d:(", node->preorder_num);
532 print_hard_reg_set (f, node->hard_regs->set, false);
533 fprintf (f, ")@%" PRId64"\n", node->hard_regs->cost);
534 print_hard_regs_subforest (f, node->first, level + 1);
538 /* Print the allocno hard register forest to F. */
539 static void
540 print_hard_regs_forest (FILE *f)
542 fprintf (f, " Hard reg set forest:\n");
543 print_hard_regs_subforest (f, hard_regs_roots, 1);
546 /* Print the allocno hard register forest to stderr. */
547 void
548 ira_debug_hard_regs_forest (void)
550 print_hard_regs_forest (stderr);
553 /* Remove unused allocno hard registers nodes from forest given by its
554 *ROOTS. */
555 static void
556 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
558 allocno_hard_regs_node_t node, prev, next, last;
560 for (prev = NULL, node = *roots; node != NULL; node = next)
562 next = node->next;
563 if (node->used_p)
565 remove_unused_allocno_hard_regs_nodes (&node->first);
566 prev = node;
568 else
570 for (last = node->first;
571 last != NULL && last->next != NULL;
572 last = last->next)
574 if (last != NULL)
576 if (prev == NULL)
577 *roots = node->first;
578 else
579 prev->next = node->first;
580 if (next != NULL)
581 next->prev = last;
582 last->next = next;
583 next = node->first;
585 else
587 if (prev == NULL)
588 *roots = next;
589 else
590 prev->next = next;
591 if (next != NULL)
592 next->prev = prev;
594 ira_free (node);
599 /* Set up fields preorder_num starting with START_NUM in all allocno
600 hard registers nodes in forest given by FIRST. Return biggest set
601 PREORDER_NUM increased by 1. */
602 static int
603 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
604 allocno_hard_regs_node_t parent,
605 int start_num)
607 allocno_hard_regs_node_t node;
609 for (node = first; node != NULL; node = node->next)
611 node->preorder_num = start_num++;
612 node->parent = parent;
613 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
614 start_num);
616 return start_num;
619 /* Number of allocno hard registers nodes in the forest. */
620 static int allocno_hard_regs_nodes_num;
622 /* Table preorder number of allocno hard registers node in the forest
623 -> the allocno hard registers node. */
624 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
626 /* See below. */
627 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
629 /* The structure is used to describes all subnodes (not only immediate
630 ones) in the mentioned above tree for given allocno hard register
631 node. The usage of such data accelerates calculation of
632 colorability of given allocno. */
633 struct allocno_hard_regs_subnode
635 /* The conflict size of conflicting allocnos whose hard register
636 sets are equal sets (plus supersets if given node is given
637 allocno hard registers node) of one in the given node. */
638 int left_conflict_size;
639 /* The summary conflict size of conflicting allocnos whose hard
640 register sets are strict subsets of one in the given node.
641 Overall conflict size is
642 left_conflict_subnodes_size
643 + MIN (max_node_impact - left_conflict_subnodes_size,
644 left_conflict_size)
646 short left_conflict_subnodes_size;
647 short max_node_impact;
650 /* Container for hard regs subnodes of all allocnos. */
651 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
653 /* Table (preorder number of allocno hard registers node in the
654 forest, preorder number of allocno hard registers subnode) -> index
655 of the subnode relative to the node. -1 if it is not a
656 subnode. */
657 static int *allocno_hard_regs_subnode_index;
659 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
660 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
661 static void
662 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
664 allocno_hard_regs_node_t node, parent;
665 int index;
667 for (node = first; node != NULL; node = node->next)
669 allocno_hard_regs_nodes[node->preorder_num] = node;
670 for (parent = node; parent != NULL; parent = parent->parent)
672 index = parent->preorder_num * allocno_hard_regs_nodes_num;
673 allocno_hard_regs_subnode_index[index + node->preorder_num]
674 = node->preorder_num - parent->preorder_num;
676 setup_allocno_hard_regs_subnode_index (node->first);
680 /* Count all allocno hard registers nodes in tree ROOT. */
681 static int
682 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
684 int len = 1;
686 for (root = root->first; root != NULL; root = root->next)
687 len += get_allocno_hard_regs_subnodes_num (root);
688 return len;
691 /* Build the forest of allocno hard registers nodes and assign each
692 allocno a node from the forest. */
693 static void
694 form_allocno_hard_regs_nodes_forest (void)
696 unsigned int i, j, size, len;
697 int start;
698 ira_allocno_t a;
699 allocno_hard_regs_t hv;
700 bitmap_iterator bi;
701 HARD_REG_SET temp;
702 allocno_hard_regs_node_t node, allocno_hard_regs_node;
703 allocno_color_data_t allocno_data;
705 node_check_tick = 0;
706 init_allocno_hard_regs ();
707 hard_regs_roots = NULL;
708 hard_regs_node_vec.create (100);
709 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
710 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
712 CLEAR_HARD_REG_SET (temp);
713 SET_HARD_REG_BIT (temp, i);
714 hv = add_allocno_hard_regs (temp, 0);
715 node = create_new_allocno_hard_regs_node (hv);
716 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
718 start = allocno_hard_regs_vec.length ();
719 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
721 a = ira_allocnos[i];
722 allocno_data = ALLOCNO_COLOR_DATA (a);
724 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
725 continue;
726 hv = (add_allocno_hard_regs
727 (allocno_data->profitable_hard_regs,
728 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
730 SET_HARD_REG_SET (temp);
731 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
732 add_allocno_hard_regs (temp, 0);
733 qsort (allocno_hard_regs_vec.address () + start,
734 allocno_hard_regs_vec.length () - start,
735 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
736 for (i = start;
737 allocno_hard_regs_vec.iterate (i, &hv);
738 i++)
740 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
741 ira_assert (hard_regs_node_vec.length () == 0);
743 /* We need to set up parent fields for right work of
744 first_common_ancestor_node. */
745 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
746 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
748 a = ira_allocnos[i];
749 allocno_data = ALLOCNO_COLOR_DATA (a);
750 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
751 continue;
752 hard_regs_node_vec.truncate (0);
753 collect_allocno_hard_regs_cover (hard_regs_roots,
754 allocno_data->profitable_hard_regs);
755 allocno_hard_regs_node = NULL;
756 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
757 allocno_hard_regs_node
758 = (j == 0
759 ? node
760 : first_common_ancestor_node (node, allocno_hard_regs_node));
761 /* That is a temporary storage. */
762 allocno_hard_regs_node->used_p = true;
763 allocno_data->hard_regs_node = allocno_hard_regs_node;
765 ira_assert (hard_regs_roots->next == NULL);
766 hard_regs_roots->used_p = true;
767 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
768 allocno_hard_regs_nodes_num
769 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
770 allocno_hard_regs_nodes
771 = ((allocno_hard_regs_node_t *)
772 ira_allocate (allocno_hard_regs_nodes_num
773 * sizeof (allocno_hard_regs_node_t)));
774 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
775 allocno_hard_regs_subnode_index
776 = (int *) ira_allocate (size * sizeof (int));
777 for (i = 0; i < size; i++)
778 allocno_hard_regs_subnode_index[i] = -1;
779 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
780 start = 0;
781 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
783 a = ira_allocnos[i];
784 allocno_data = ALLOCNO_COLOR_DATA (a);
785 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
786 continue;
787 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
788 allocno_data->hard_regs_subnodes_start = start;
789 allocno_data->hard_regs_subnodes_num = len;
790 start += len;
792 allocno_hard_regs_subnodes
793 = ((allocno_hard_regs_subnode_t)
794 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
795 hard_regs_node_vec.release ();
798 /* Free tree of allocno hard registers nodes given by its ROOT. */
799 static void
800 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
802 allocno_hard_regs_node_t child, next;
804 for (child = root->first; child != NULL; child = next)
806 next = child->next;
807 finish_allocno_hard_regs_nodes_tree (child);
809 ira_free (root);
812 /* Finish work with the forest of allocno hard registers nodes. */
813 static void
814 finish_allocno_hard_regs_nodes_forest (void)
816 allocno_hard_regs_node_t node, next;
818 ira_free (allocno_hard_regs_subnodes);
819 for (node = hard_regs_roots; node != NULL; node = next)
821 next = node->next;
822 finish_allocno_hard_regs_nodes_tree (node);
824 ira_free (allocno_hard_regs_nodes);
825 ira_free (allocno_hard_regs_subnode_index);
826 finish_allocno_hard_regs ();
829 /* Set up left conflict sizes and left conflict subnodes sizes of hard
830 registers subnodes of allocno A. Return TRUE if allocno A is
831 trivially colorable. */
832 static bool
833 setup_left_conflict_sizes_p (ira_allocno_t a)
835 int i, k, nobj, start;
836 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
837 allocno_color_data_t data;
838 HARD_REG_SET profitable_hard_regs;
839 allocno_hard_regs_subnode_t subnodes;
840 allocno_hard_regs_node_t node;
841 HARD_REG_SET node_set;
843 nobj = ALLOCNO_NUM_OBJECTS (a);
844 data = ALLOCNO_COLOR_DATA (a);
845 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
846 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
847 node = data->hard_regs_node;
848 node_preorder_num = node->preorder_num;
849 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
850 node_check_tick++;
851 for (k = 0; k < nobj; k++)
853 ira_object_t obj = ALLOCNO_OBJECT (a, k);
854 ira_object_t conflict_obj;
855 ira_object_conflict_iterator oci;
857 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
859 int size;
860 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
861 allocno_hard_regs_node_t conflict_node, temp_node;
862 HARD_REG_SET conflict_node_set;
863 allocno_color_data_t conflict_data;
865 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
866 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
867 || ! hard_reg_set_intersect_p (profitable_hard_regs,
868 conflict_data
869 ->profitable_hard_regs))
870 continue;
871 conflict_node = conflict_data->hard_regs_node;
872 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
873 if (hard_reg_set_subset_p (node_set, conflict_node_set))
874 temp_node = node;
875 else
877 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
878 temp_node = conflict_node;
880 if (temp_node->check != node_check_tick)
882 temp_node->check = node_check_tick;
883 temp_node->conflict_size = 0;
885 size = (ira_reg_class_max_nregs
886 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
887 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
888 /* We will deal with the subwords individually. */
889 size = 1;
890 temp_node->conflict_size += size;
893 for (i = 0; i < data->hard_regs_subnodes_num; i++)
895 allocno_hard_regs_node_t temp_node;
897 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
898 ira_assert (temp_node->preorder_num == i + node_preorder_num);
899 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
900 ? 0 : temp_node->conflict_size);
901 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
902 profitable_hard_regs))
903 subnodes[i].max_node_impact = temp_node->hard_regs_num;
904 else
906 HARD_REG_SET temp_set;
907 int j, n, hard_regno;
908 enum reg_class aclass;
910 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
911 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
912 aclass = ALLOCNO_CLASS (a);
913 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
915 hard_regno = ira_class_hard_regs[aclass][j];
916 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
917 n++;
919 subnodes[i].max_node_impact = n;
921 subnodes[i].left_conflict_subnodes_size = 0;
923 start = node_preorder_num * allocno_hard_regs_nodes_num;
924 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
926 int size, parent_i;
927 allocno_hard_regs_node_t parent;
929 size = (subnodes[i].left_conflict_subnodes_size
930 + MIN (subnodes[i].max_node_impact
931 - subnodes[i].left_conflict_subnodes_size,
932 subnodes[i].left_conflict_size));
933 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
934 gcc_checking_assert(parent);
935 parent_i
936 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
937 gcc_checking_assert(parent_i >= 0);
938 subnodes[parent_i].left_conflict_subnodes_size += size;
940 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
941 conflict_size
942 = (left_conflict_subnodes_size
943 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
944 subnodes[0].left_conflict_size));
945 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
946 data->colorable_p = conflict_size <= data->available_regs_num;
947 return data->colorable_p;
950 /* Update left conflict sizes of hard registers subnodes of allocno A
951 after removing allocno REMOVED_A with SIZE from the conflict graph.
952 Return TRUE if A is trivially colorable. */
953 static bool
954 update_left_conflict_sizes_p (ira_allocno_t a,
955 ira_allocno_t removed_a, int size)
957 int i, conflict_size, before_conflict_size, diff, start;
958 int node_preorder_num, parent_i;
959 allocno_hard_regs_node_t node, removed_node, parent;
960 allocno_hard_regs_subnode_t subnodes;
961 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
963 ira_assert (! data->colorable_p);
964 node = data->hard_regs_node;
965 node_preorder_num = node->preorder_num;
966 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
967 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
968 node->hard_regs->set)
969 || hard_reg_set_subset_p (node->hard_regs->set,
970 removed_node->hard_regs->set));
971 start = node_preorder_num * allocno_hard_regs_nodes_num;
972 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
973 if (i < 0)
974 i = 0;
975 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
976 before_conflict_size
977 = (subnodes[i].left_conflict_subnodes_size
978 + MIN (subnodes[i].max_node_impact
979 - subnodes[i].left_conflict_subnodes_size,
980 subnodes[i].left_conflict_size));
981 subnodes[i].left_conflict_size -= size;
982 for (;;)
984 conflict_size
985 = (subnodes[i].left_conflict_subnodes_size
986 + MIN (subnodes[i].max_node_impact
987 - subnodes[i].left_conflict_subnodes_size,
988 subnodes[i].left_conflict_size));
989 if ((diff = before_conflict_size - conflict_size) == 0)
990 break;
991 ira_assert (conflict_size < before_conflict_size);
992 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
993 if (parent == NULL)
994 break;
995 parent_i
996 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
997 if (parent_i < 0)
998 break;
999 i = parent_i;
1000 before_conflict_size
1001 = (subnodes[i].left_conflict_subnodes_size
1002 + MIN (subnodes[i].max_node_impact
1003 - subnodes[i].left_conflict_subnodes_size,
1004 subnodes[i].left_conflict_size));
1005 subnodes[i].left_conflict_subnodes_size -= diff;
1007 if (i != 0
1008 || (conflict_size
1009 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1010 > data->available_regs_num))
1011 return false;
1012 data->colorable_p = true;
1013 return true;
1016 /* Return true if allocno A has empty profitable hard regs. */
1017 static bool
1018 empty_profitable_hard_regs (ira_allocno_t a)
1020 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1022 return hard_reg_set_empty_p (data->profitable_hard_regs);
1025 /* Set up profitable hard registers for each allocno being
1026 colored. */
1027 static void
1028 setup_profitable_hard_regs (void)
1030 unsigned int i;
1031 int j, k, nobj, hard_regno, nregs, class_size;
1032 ira_allocno_t a;
1033 bitmap_iterator bi;
1034 enum reg_class aclass;
1035 machine_mode mode;
1036 allocno_color_data_t data;
1038 /* Initial set up from allocno classes and explicitly conflicting
1039 hard regs. */
1040 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1042 a = ira_allocnos[i];
1043 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1044 continue;
1045 data = ALLOCNO_COLOR_DATA (a);
1046 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1047 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a)
1048 /* Do not empty profitable regs for static chain pointer
1049 pseudo when non-local goto is used. */
1050 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1051 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1052 else
1054 mode = ALLOCNO_MODE (a);
1055 COPY_HARD_REG_SET (data->profitable_hard_regs,
1056 ira_useful_class_mode_regs[aclass][mode]);
1057 nobj = ALLOCNO_NUM_OBJECTS (a);
1058 for (k = 0; k < nobj; k++)
1060 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1062 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1063 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1067 /* Exclude hard regs already assigned for conflicting objects. */
1068 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1070 a = ira_allocnos[i];
1071 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1072 || ! ALLOCNO_ASSIGNED_P (a)
1073 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1074 continue;
1075 mode = ALLOCNO_MODE (a);
1076 nregs = hard_regno_nregs[hard_regno][mode];
1077 nobj = ALLOCNO_NUM_OBJECTS (a);
1078 for (k = 0; k < nobj; k++)
1080 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1081 ira_object_t conflict_obj;
1082 ira_object_conflict_iterator oci;
1084 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1086 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1088 /* We can process the conflict allocno repeatedly with
1089 the same result. */
1090 if (nregs == nobj && nregs > 1)
1092 int num = OBJECT_SUBWORD (conflict_obj);
1094 if (REG_WORDS_BIG_ENDIAN)
1095 CLEAR_HARD_REG_BIT
1096 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1097 hard_regno + nobj - num - 1);
1098 else
1099 CLEAR_HARD_REG_BIT
1100 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1101 hard_regno + num);
1103 else
1104 AND_COMPL_HARD_REG_SET
1105 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1106 ira_reg_mode_hard_regset[hard_regno][mode]);
1110 /* Exclude too costly hard regs. */
1111 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1113 int min_cost = INT_MAX;
1114 int *costs;
1116 a = ira_allocnos[i];
1117 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1118 || empty_profitable_hard_regs (a))
1119 continue;
1120 data = ALLOCNO_COLOR_DATA (a);
1121 mode = ALLOCNO_MODE (a);
1122 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1123 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1125 class_size = ira_class_hard_regs_num[aclass];
1126 for (j = 0; j < class_size; j++)
1128 hard_regno = ira_class_hard_regs[aclass][j];
1129 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1130 hard_regno))
1131 continue;
1132 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j]
1133 /* Do not remove HARD_REGNO for static chain pointer
1134 pseudo when non-local goto is used. */
1135 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1136 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1137 hard_regno);
1138 else if (min_cost > costs[j])
1139 min_cost = costs[j];
1142 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1143 < ALLOCNO_UPDATED_CLASS_COST (a)
1144 /* Do not empty profitable regs for static chain
1145 pointer pseudo when non-local goto is used. */
1146 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1147 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1148 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1149 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1155 /* This page contains functions used to choose hard registers for
1156 allocnos. */
1158 /* Pool for update cost records. */
1159 static object_allocator<update_cost_record> update_cost_record_pool
1160 ("update cost records");
1162 /* Return new update cost record with given params. */
1163 static struct update_cost_record *
1164 get_update_cost_record (int hard_regno, int divisor,
1165 struct update_cost_record *next)
1167 struct update_cost_record *record;
1169 record = update_cost_record_pool.allocate ();
1170 record->hard_regno = hard_regno;
1171 record->divisor = divisor;
1172 record->next = next;
1173 return record;
1176 /* Free memory for all records in LIST. */
1177 static void
1178 free_update_cost_record_list (struct update_cost_record *list)
1180 struct update_cost_record *next;
1182 while (list != NULL)
1184 next = list->next;
1185 update_cost_record_pool.remove (list);
1186 list = next;
1190 /* Free memory allocated for all update cost records. */
1191 static void
1192 finish_update_cost_records (void)
1194 update_cost_record_pool.release ();
1197 /* Array whose element value is TRUE if the corresponding hard
1198 register was already allocated for an allocno. */
1199 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1201 /* Describes one element in a queue of allocnos whose costs need to be
1202 updated. Each allocno in the queue is known to have an allocno
1203 class. */
1204 struct update_cost_queue_elem
1206 /* This element is in the queue iff CHECK == update_cost_check. */
1207 int check;
1209 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1210 connecting this allocno to the one being allocated. */
1211 int divisor;
1213 /* Allocno from which we are chaining costs of connected allocnos.
1214 It is used not go back in graph of allocnos connected by
1215 copies. */
1216 ira_allocno_t from;
1218 /* The next allocno in the queue, or null if this is the last element. */
1219 ira_allocno_t next;
1222 /* The first element in a queue of allocnos whose copy costs need to be
1223 updated. Null if the queue is empty. */
1224 static ira_allocno_t update_cost_queue;
1226 /* The last element in the queue described by update_cost_queue.
1227 Not valid if update_cost_queue is null. */
1228 static struct update_cost_queue_elem *update_cost_queue_tail;
1230 /* A pool of elements in the queue described by update_cost_queue.
1231 Elements are indexed by ALLOCNO_NUM. */
1232 static struct update_cost_queue_elem *update_cost_queue_elems;
1234 /* The current value of update_costs_from_copies call count. */
1235 static int update_cost_check;
1237 /* Allocate and initialize data necessary for function
1238 update_costs_from_copies. */
1239 static void
1240 initiate_cost_update (void)
1242 size_t size;
1244 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1245 update_cost_queue_elems
1246 = (struct update_cost_queue_elem *) ira_allocate (size);
1247 memset (update_cost_queue_elems, 0, size);
1248 update_cost_check = 0;
1251 /* Deallocate data used by function update_costs_from_copies. */
1252 static void
1253 finish_cost_update (void)
1255 ira_free (update_cost_queue_elems);
1256 finish_update_cost_records ();
1259 /* When we traverse allocnos to update hard register costs, the cost
1260 divisor will be multiplied by the following macro value for each
1261 hop from given allocno to directly connected allocnos. */
1262 #define COST_HOP_DIVISOR 4
1264 /* Start a new cost-updating pass. */
1265 static void
1266 start_update_cost (void)
1268 update_cost_check++;
1269 update_cost_queue = NULL;
1272 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1273 ALLOCNO is already in the queue, or has NO_REGS class. */
1274 static inline void
1275 queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
1277 struct update_cost_queue_elem *elem;
1279 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1280 if (elem->check != update_cost_check
1281 && ALLOCNO_CLASS (allocno) != NO_REGS)
1283 elem->check = update_cost_check;
1284 elem->from = from;
1285 elem->divisor = divisor;
1286 elem->next = NULL;
1287 if (update_cost_queue == NULL)
1288 update_cost_queue = allocno;
1289 else
1290 update_cost_queue_tail->next = allocno;
1291 update_cost_queue_tail = elem;
1295 /* Try to remove the first element from update_cost_queue. Return
1296 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1297 *DIVISOR) describe the removed element. */
1298 static inline bool
1299 get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
1301 struct update_cost_queue_elem *elem;
1303 if (update_cost_queue == NULL)
1304 return false;
1306 *allocno = update_cost_queue;
1307 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1308 *from = elem->from;
1309 *divisor = elem->divisor;
1310 update_cost_queue = elem->next;
1311 return true;
1314 /* Increase costs of HARD_REGNO by UPDATE_COST and conflict cost by
1315 UPDATE_CONFLICT_COST for ALLOCNO. Return true if we really
1316 modified the cost. */
1317 static bool
1318 update_allocno_cost (ira_allocno_t allocno, int hard_regno,
1319 int update_cost, int update_conflict_cost)
1321 int i;
1322 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1324 i = ira_class_hard_reg_index[aclass][hard_regno];
1325 if (i < 0)
1326 return false;
1327 ira_allocate_and_set_or_copy_costs
1328 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1329 ALLOCNO_UPDATED_CLASS_COST (allocno),
1330 ALLOCNO_HARD_REG_COSTS (allocno));
1331 ira_allocate_and_set_or_copy_costs
1332 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1333 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1334 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
1335 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_conflict_cost;
1336 return true;
1339 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1340 by copies to ALLOCNO to increase chances to remove some copies as
1341 the result of subsequent assignment. Record cost updates if
1342 RECORD_P is true. */
1343 static void
1344 update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1345 int divisor, bool decr_p, bool record_p)
1347 int cost, update_cost, update_conflict_cost;
1348 machine_mode mode;
1349 enum reg_class rclass, aclass;
1350 ira_allocno_t another_allocno, from = NULL;
1351 ira_copy_t cp, next_cp;
1353 rclass = REGNO_REG_CLASS (hard_regno);
1356 mode = ALLOCNO_MODE (allocno);
1357 ira_init_register_move_cost_if_necessary (mode);
1358 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1360 if (cp->first == allocno)
1362 next_cp = cp->next_first_allocno_copy;
1363 another_allocno = cp->second;
1365 else if (cp->second == allocno)
1367 next_cp = cp->next_second_allocno_copy;
1368 another_allocno = cp->first;
1370 else
1371 gcc_unreachable ();
1373 if (another_allocno == from)
1374 continue;
1376 aclass = ALLOCNO_CLASS (another_allocno);
1377 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1378 hard_regno)
1379 || ALLOCNO_ASSIGNED_P (another_allocno))
1380 continue;
1382 cost = (cp->second == allocno
1383 ? ira_register_move_cost[mode][rclass][aclass]
1384 : ira_register_move_cost[mode][aclass][rclass]);
1385 if (decr_p)
1386 cost = -cost;
1388 update_conflict_cost = update_cost = cp->freq * cost / divisor;
1390 if (ALLOCNO_COLOR_DATA (another_allocno) != NULL
1391 && (ALLOCNO_COLOR_DATA (allocno)->first_thread_allocno
1392 != ALLOCNO_COLOR_DATA (another_allocno)->first_thread_allocno))
1393 /* Decrease conflict cost of ANOTHER_ALLOCNO if it is not
1394 in the same allocation thread. */
1395 update_conflict_cost /= COST_HOP_DIVISOR;
1397 if (update_cost == 0)
1398 continue;
1400 if (! update_allocno_cost (another_allocno, hard_regno,
1401 update_cost, update_conflict_cost))
1402 continue;
1403 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1404 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1405 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1406 = get_update_cost_record (hard_regno, divisor,
1407 ALLOCNO_COLOR_DATA (another_allocno)
1408 ->update_cost_records);
1411 while (get_next_update_cost (&allocno, &from, &divisor));
1414 /* Decrease preferred ALLOCNO hard register costs and costs of
1415 allocnos connected to ALLOCNO through copy. */
1416 static void
1417 update_costs_from_prefs (ira_allocno_t allocno)
1419 ira_pref_t pref;
1421 start_update_cost ();
1422 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1423 update_costs_from_allocno (allocno, pref->hard_regno,
1424 COST_HOP_DIVISOR, true, true);
1427 /* Update (decrease if DECR_P) the cost of allocnos connected to
1428 ALLOCNO through copies to increase chances to remove some copies as
1429 the result of subsequent assignment. ALLOCNO was just assigned to
1430 a hard register. Record cost updates if RECORD_P is true. */
1431 static void
1432 update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1434 int hard_regno;
1436 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1437 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1438 start_update_cost ();
1439 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1442 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1443 before updating costs of these allocnos from given allocno. This
1444 is a wise thing to do as if given allocno did not get an expected
1445 hard reg, using smaller cost of the hard reg for allocnos connected
1446 by copies to given allocno becomes actually misleading. Free all
1447 update cost records for ALLOCNO as we don't need them anymore. */
1448 static void
1449 restore_costs_from_copies (ira_allocno_t allocno)
1451 struct update_cost_record *records, *curr;
1453 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1454 return;
1455 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1456 start_update_cost ();
1457 for (curr = records; curr != NULL; curr = curr->next)
1458 update_costs_from_allocno (allocno, curr->hard_regno,
1459 curr->divisor, true, false);
1460 free_update_cost_record_list (records);
1461 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
1464 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1465 of ACLASS by conflict costs of the unassigned allocnos
1466 connected by copies with allocnos in update_cost_queue. This
1467 update increases chances to remove some copies. */
1468 static void
1469 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1470 bool decr_p)
1472 int i, cost, class_size, freq, mult, div, divisor;
1473 int index, hard_regno;
1474 int *conflict_costs;
1475 bool cont_p;
1476 enum reg_class another_aclass;
1477 ira_allocno_t allocno, another_allocno, from;
1478 ira_copy_t cp, next_cp;
1480 while (get_next_update_cost (&allocno, &from, &divisor))
1481 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1483 if (cp->first == allocno)
1485 next_cp = cp->next_first_allocno_copy;
1486 another_allocno = cp->second;
1488 else if (cp->second == allocno)
1490 next_cp = cp->next_second_allocno_copy;
1491 another_allocno = cp->first;
1493 else
1494 gcc_unreachable ();
1496 if (another_allocno == from)
1497 continue;
1499 another_aclass = ALLOCNO_CLASS (another_allocno);
1500 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1501 || ALLOCNO_ASSIGNED_P (another_allocno)
1502 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1503 continue;
1504 class_size = ira_class_hard_regs_num[another_aclass];
1505 ira_allocate_and_copy_costs
1506 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1507 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1508 conflict_costs
1509 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1510 if (conflict_costs == NULL)
1511 cont_p = true;
1512 else
1514 mult = cp->freq;
1515 freq = ALLOCNO_FREQ (another_allocno);
1516 if (freq == 0)
1517 freq = 1;
1518 div = freq * divisor;
1519 cont_p = false;
1520 for (i = class_size - 1; i >= 0; i--)
1522 hard_regno = ira_class_hard_regs[another_aclass][i];
1523 ira_assert (hard_regno >= 0);
1524 index = ira_class_hard_reg_index[aclass][hard_regno];
1525 if (index < 0)
1526 continue;
1527 cost = (int) ((unsigned) conflict_costs [i] * mult) / div;
1528 if (cost == 0)
1529 continue;
1530 cont_p = true;
1531 if (decr_p)
1532 cost = -cost;
1533 costs[index] += cost;
1536 /* Probably 5 hops will be enough. */
1537 if (cont_p
1538 && divisor <= (COST_HOP_DIVISOR
1539 * COST_HOP_DIVISOR
1540 * COST_HOP_DIVISOR
1541 * COST_HOP_DIVISOR))
1542 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1546 /* Set up conflicting (through CONFLICT_REGS) for each object of
1547 allocno A and the start allocno profitable regs (through
1548 START_PROFITABLE_REGS). Remember that the start profitable regs
1549 exclude hard regs which can not hold value of mode of allocno A.
1550 This covers mostly cases when multi-register value should be
1551 aligned. */
1552 static inline void
1553 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1554 HARD_REG_SET *conflict_regs,
1555 HARD_REG_SET *start_profitable_regs)
1557 int i, nwords;
1558 ira_object_t obj;
1560 nwords = ALLOCNO_NUM_OBJECTS (a);
1561 for (i = 0; i < nwords; i++)
1563 obj = ALLOCNO_OBJECT (a, i);
1564 COPY_HARD_REG_SET (conflict_regs[i],
1565 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1567 if (retry_p)
1569 COPY_HARD_REG_SET (*start_profitable_regs,
1570 reg_class_contents[ALLOCNO_CLASS (a)]);
1571 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1572 ira_prohibited_class_mode_regs
1573 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1575 else
1576 COPY_HARD_REG_SET (*start_profitable_regs,
1577 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1580 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1581 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1582 static inline bool
1583 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1584 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1586 int j, nwords, nregs;
1587 enum reg_class aclass;
1588 machine_mode mode;
1590 aclass = ALLOCNO_CLASS (a);
1591 mode = ALLOCNO_MODE (a);
1592 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1593 hard_regno))
1594 return false;
1595 /* Checking only profitable hard regs. */
1596 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1597 return false;
1598 nregs = hard_regno_nregs[hard_regno][mode];
1599 nwords = ALLOCNO_NUM_OBJECTS (a);
1600 for (j = 0; j < nregs; j++)
1602 int k;
1603 int set_to_test_start = 0, set_to_test_end = nwords;
1605 if (nregs == nwords)
1607 if (REG_WORDS_BIG_ENDIAN)
1608 set_to_test_start = nwords - j - 1;
1609 else
1610 set_to_test_start = j;
1611 set_to_test_end = set_to_test_start + 1;
1613 for (k = set_to_test_start; k < set_to_test_end; k++)
1614 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1615 break;
1616 if (k != set_to_test_end)
1617 break;
1619 return j == nregs;
1622 /* Return number of registers needed to be saved and restored at
1623 function prologue/epilogue if we allocate HARD_REGNO to hold value
1624 of MODE. */
1625 static int
1626 calculate_saved_nregs (int hard_regno, machine_mode mode)
1628 int i;
1629 int nregs = 0;
1631 ira_assert (hard_regno >= 0);
1632 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1633 if (!allocated_hardreg_p[hard_regno + i]
1634 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1635 && !LOCAL_REGNO (hard_regno + i))
1636 nregs++;
1637 return nregs;
1640 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1641 that the function called from function
1642 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1643 this case some allocno data are not defined or updated and we
1644 should not touch these data. The function returns true if we
1645 managed to assign a hard register to the allocno.
1647 To assign a hard register, first of all we calculate all conflict
1648 hard registers which can come from conflicting allocnos with
1649 already assigned hard registers. After that we find first free
1650 hard register with the minimal cost. During hard register cost
1651 calculation we take conflict hard register costs into account to
1652 give a chance for conflicting allocnos to get a better hard
1653 register in the future.
1655 If the best hard register cost is bigger than cost of memory usage
1656 for the allocno, we don't assign a hard register to given allocno
1657 at all.
1659 If we assign a hard register to the allocno, we update costs of the
1660 hard register for allocnos connected by copies to improve a chance
1661 to coalesce insns represented by the copies when we assign hard
1662 registers to the allocnos connected by the copies. */
1663 static bool
1664 assign_hard_reg (ira_allocno_t a, bool retry_p)
1666 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1667 int i, j, hard_regno, best_hard_regno, class_size;
1668 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1669 int *a_costs;
1670 enum reg_class aclass;
1671 machine_mode mode;
1672 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1673 int saved_nregs;
1674 enum reg_class rclass;
1675 int add_cost;
1676 #ifdef STACK_REGS
1677 bool no_stack_reg_p;
1678 #endif
1680 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1681 get_conflict_and_start_profitable_regs (a, retry_p,
1682 conflicting_regs,
1683 &profitable_hard_regs);
1684 aclass = ALLOCNO_CLASS (a);
1685 class_size = ira_class_hard_regs_num[aclass];
1686 best_hard_regno = -1;
1687 memset (full_costs, 0, sizeof (int) * class_size);
1688 mem_cost = 0;
1689 memset (costs, 0, sizeof (int) * class_size);
1690 memset (full_costs, 0, sizeof (int) * class_size);
1691 #ifdef STACK_REGS
1692 no_stack_reg_p = false;
1693 #endif
1694 if (! retry_p)
1695 start_update_cost ();
1696 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1698 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1699 aclass, ALLOCNO_HARD_REG_COSTS (a));
1700 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1701 #ifdef STACK_REGS
1702 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1703 #endif
1704 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1705 for (i = 0; i < class_size; i++)
1706 if (a_costs != NULL)
1708 costs[i] += a_costs[i];
1709 full_costs[i] += a_costs[i];
1711 else
1713 costs[i] += cost;
1714 full_costs[i] += cost;
1716 nwords = ALLOCNO_NUM_OBJECTS (a);
1717 curr_allocno_process++;
1718 for (word = 0; word < nwords; word++)
1720 ira_object_t conflict_obj;
1721 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1722 ira_object_conflict_iterator oci;
1724 /* Take preferences of conflicting allocnos into account. */
1725 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1727 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1728 enum reg_class conflict_aclass;
1729 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a);
1731 /* Reload can give another class so we need to check all
1732 allocnos. */
1733 if (!retry_p
1734 && ((!ALLOCNO_ASSIGNED_P (conflict_a)
1735 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1736 && !(hard_reg_set_intersect_p
1737 (profitable_hard_regs,
1738 ALLOCNO_COLOR_DATA
1739 (conflict_a)->profitable_hard_regs))))
1741 /* All conflict allocnos are in consideration bitmap
1742 when retry_p is false. It might change in future and
1743 if it happens the assert will be broken. It means
1744 the code should be modified for the new
1745 assumptions. */
1746 ira_assert (bitmap_bit_p (consideration_allocno_bitmap,
1747 ALLOCNO_NUM (conflict_a)));
1748 continue;
1750 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1751 ira_assert (ira_reg_classes_intersect_p
1752 [aclass][conflict_aclass]);
1753 if (ALLOCNO_ASSIGNED_P (conflict_a))
1755 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1756 if (hard_regno >= 0
1757 && (ira_hard_reg_set_intersection_p
1758 (hard_regno, ALLOCNO_MODE (conflict_a),
1759 reg_class_contents[aclass])))
1761 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1762 int conflict_nregs;
1764 mode = ALLOCNO_MODE (conflict_a);
1765 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1766 if (conflict_nregs == n_objects && conflict_nregs > 1)
1768 int num = OBJECT_SUBWORD (conflict_obj);
1770 if (REG_WORDS_BIG_ENDIAN)
1771 SET_HARD_REG_BIT (conflicting_regs[word],
1772 hard_regno + n_objects - num - 1);
1773 else
1774 SET_HARD_REG_BIT (conflicting_regs[word],
1775 hard_regno + num);
1777 else
1778 IOR_HARD_REG_SET
1779 (conflicting_regs[word],
1780 ira_reg_mode_hard_regset[hard_regno][mode]);
1781 if (hard_reg_set_subset_p (profitable_hard_regs,
1782 conflicting_regs[word]))
1783 goto fail;
1786 else if (! retry_p
1787 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1788 /* Don't process the conflict allocno twice. */
1789 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1790 != curr_allocno_process))
1792 int k, *conflict_costs;
1794 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1795 = curr_allocno_process;
1796 ira_allocate_and_copy_costs
1797 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1798 conflict_aclass,
1799 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1800 conflict_costs
1801 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1802 if (conflict_costs != NULL)
1803 for (j = class_size - 1; j >= 0; j--)
1805 hard_regno = ira_class_hard_regs[aclass][j];
1806 ira_assert (hard_regno >= 0);
1807 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1808 if (k < 0
1809 /* If HARD_REGNO is not available for CONFLICT_A,
1810 the conflict would be ignored, since HARD_REGNO
1811 will never be assigned to CONFLICT_A. */
1812 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
1813 hard_regno))
1814 continue;
1815 full_costs[j] -= conflict_costs[k];
1817 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1822 if (! retry_p)
1823 /* Take into account preferences of allocnos connected by copies to
1824 the conflict allocnos. */
1825 update_conflict_hard_regno_costs (full_costs, aclass, true);
1827 /* Take preferences of allocnos connected by copies into
1828 account. */
1829 if (! retry_p)
1831 start_update_cost ();
1832 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1833 update_conflict_hard_regno_costs (full_costs, aclass, false);
1835 min_cost = min_full_cost = INT_MAX;
1836 /* We don't care about giving callee saved registers to allocnos no
1837 living through calls because call clobbered registers are
1838 allocated first (it is usual practice to put them first in
1839 REG_ALLOC_ORDER). */
1840 mode = ALLOCNO_MODE (a);
1841 for (i = 0; i < class_size; i++)
1843 hard_regno = ira_class_hard_regs[aclass][i];
1844 #ifdef STACK_REGS
1845 if (no_stack_reg_p
1846 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1847 continue;
1848 #endif
1849 if (! check_hard_reg_p (a, hard_regno,
1850 conflicting_regs, profitable_hard_regs))
1851 continue;
1852 cost = costs[i];
1853 full_cost = full_costs[i];
1854 if (!HONOR_REG_ALLOC_ORDER)
1856 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1857 /* We need to save/restore the hard register in
1858 epilogue/prologue. Therefore we increase the cost. */
1860 rclass = REGNO_REG_CLASS (hard_regno);
1861 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1862 + ira_memory_move_cost[mode][rclass][1])
1863 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1864 cost += add_cost;
1865 full_cost += add_cost;
1868 if (min_cost > cost)
1869 min_cost = cost;
1870 if (min_full_cost > full_cost)
1872 min_full_cost = full_cost;
1873 best_hard_regno = hard_regno;
1874 ira_assert (hard_regno >= 0);
1877 if (min_full_cost > mem_cost
1878 /* Do not spill static chain pointer pseudo when non-local goto
1879 is used. */
1880 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1882 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1883 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1884 mem_cost, min_full_cost);
1885 best_hard_regno = -1;
1887 fail:
1888 if (best_hard_regno >= 0)
1890 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1891 allocated_hardreg_p[best_hard_regno + i] = true;
1893 if (! retry_p)
1894 restore_costs_from_copies (a);
1895 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1896 ALLOCNO_ASSIGNED_P (a) = true;
1897 if (best_hard_regno >= 0)
1898 update_costs_from_copies (a, true, ! retry_p);
1899 ira_assert (ALLOCNO_CLASS (a) == aclass);
1900 /* We don't need updated costs anymore. */
1901 ira_free_allocno_updated_costs (a);
1902 return best_hard_regno >= 0;
1907 /* An array used to sort copies. */
1908 static ira_copy_t *sorted_copies;
1910 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1911 used to find a conflict for new allocnos or allocnos with the
1912 different allocno classes. */
1913 static bool
1914 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1916 rtx reg1, reg2;
1917 int i, j;
1918 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1919 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1921 if (a1 == a2)
1922 return false;
1923 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1924 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1925 if (reg1 != NULL && reg2 != NULL
1926 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1927 return false;
1929 for (i = 0; i < n1; i++)
1931 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1933 for (j = 0; j < n2; j++)
1935 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1937 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1938 OBJECT_LIVE_RANGES (c2)))
1939 return true;
1942 return false;
1945 /* The function is used to sort copies according to their execution
1946 frequencies. */
1947 static int
1948 copy_freq_compare_func (const void *v1p, const void *v2p)
1950 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1951 int pri1, pri2;
1953 pri1 = cp1->freq;
1954 pri2 = cp2->freq;
1955 if (pri2 - pri1)
1956 return pri2 - pri1;
1958 /* If frequencies are equal, sort by copies, so that the results of
1959 qsort leave nothing to chance. */
1960 return cp1->num - cp2->num;
1965 /* Return true if any allocno from thread of A1 conflicts with any
1966 allocno from thread A2. */
1967 static bool
1968 allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1970 ira_allocno_t a, conflict_a;
1972 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
1973 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1975 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
1976 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
1978 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
1979 return true;
1980 if (conflict_a == a1)
1981 break;
1983 if (a == a2)
1984 break;
1986 return false;
1989 /* Merge two threads given correspondingly by their first allocnos T1
1990 and T2 (more accurately merging T2 into T1). */
1991 static void
1992 merge_threads (ira_allocno_t t1, ira_allocno_t t2)
1994 ira_allocno_t a, next, last;
1996 gcc_assert (t1 != t2
1997 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
1998 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
1999 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
2000 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2002 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
2003 if (a == t2)
2004 break;
2005 last = a;
2007 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
2008 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
2009 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
2010 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
2013 /* Create threads by processing CP_NUM copies from sorted copies. We
2014 process the most expensive copies first. */
2015 static void
2016 form_threads_from_copies (int cp_num)
2018 ira_allocno_t a, thread1, thread2;
2019 ira_copy_t cp;
2020 int i, n;
2022 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2023 /* Form threads processing copies, most frequently executed
2024 first. */
2025 for (; cp_num != 0;)
2027 for (i = 0; i < cp_num; i++)
2029 cp = sorted_copies[i];
2030 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
2031 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2032 if (thread1 == thread2)
2033 continue;
2034 if (! allocno_thread_conflict_p (thread1, thread2))
2036 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2037 fprintf
2038 (ira_dump_file,
2039 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2040 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2041 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2042 cp->freq);
2043 merge_threads (thread1, thread2);
2044 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2046 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2047 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2048 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2049 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2050 ALLOCNO_FREQ (thread1));
2051 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2052 a != thread1;
2053 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2054 fprintf (ira_dump_file, " a%dr%d(%d)",
2055 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2056 ALLOCNO_FREQ (a));
2057 fprintf (ira_dump_file, "\n");
2059 i++;
2060 break;
2063 /* Collect the rest of copies. */
2064 for (n = 0; i < cp_num; i++)
2066 cp = sorted_copies[i];
2067 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2068 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2069 sorted_copies[n++] = cp;
2071 cp_num = n;
2075 /* Create threads by processing copies of all alocnos from BUCKET. We
2076 process the most expensive copies first. */
2077 static void
2078 form_threads_from_bucket (ira_allocno_t bucket)
2080 ira_allocno_t a;
2081 ira_copy_t cp, next_cp;
2082 int cp_num = 0;
2084 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2086 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2088 if (cp->first == a)
2090 next_cp = cp->next_first_allocno_copy;
2091 sorted_copies[cp_num++] = cp;
2093 else if (cp->second == a)
2094 next_cp = cp->next_second_allocno_copy;
2095 else
2096 gcc_unreachable ();
2099 form_threads_from_copies (cp_num);
2102 /* Create threads by processing copies of colorable allocno A. We
2103 process most expensive copies first. */
2104 static void
2105 form_threads_from_colorable_allocno (ira_allocno_t a)
2107 ira_allocno_t another_a;
2108 ira_copy_t cp, next_cp;
2109 int cp_num = 0;
2111 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2113 if (cp->first == a)
2115 next_cp = cp->next_first_allocno_copy;
2116 another_a = cp->second;
2118 else if (cp->second == a)
2120 next_cp = cp->next_second_allocno_copy;
2121 another_a = cp->first;
2123 else
2124 gcc_unreachable ();
2125 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2126 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2127 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2128 sorted_copies[cp_num++] = cp;
2130 form_threads_from_copies (cp_num);
2133 /* Form initial threads which contain only one allocno. */
2134 static void
2135 init_allocno_threads (void)
2137 ira_allocno_t a;
2138 unsigned int j;
2139 bitmap_iterator bi;
2141 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2143 a = ira_allocnos[j];
2144 /* Set up initial thread data: */
2145 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2146 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2147 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2153 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2155 /* Bucket of allocnos that can colored currently without spilling. */
2156 static ira_allocno_t colorable_allocno_bucket;
2158 /* Bucket of allocnos that might be not colored currently without
2159 spilling. */
2160 static ira_allocno_t uncolorable_allocno_bucket;
2162 /* The current number of allocnos in the uncolorable_bucket. */
2163 static int uncolorable_allocnos_num;
2165 /* Return the current spill priority of allocno A. The less the
2166 number, the more preferable the allocno for spilling. */
2167 static inline int
2168 allocno_spill_priority (ira_allocno_t a)
2170 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2172 return (data->temp
2173 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2174 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
2175 + 1));
2178 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2179 before the call. */
2180 static void
2181 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2183 ira_allocno_t first_a;
2184 allocno_color_data_t data;
2186 if (bucket_ptr == &uncolorable_allocno_bucket
2187 && ALLOCNO_CLASS (a) != NO_REGS)
2189 uncolorable_allocnos_num++;
2190 ira_assert (uncolorable_allocnos_num > 0);
2192 first_a = *bucket_ptr;
2193 data = ALLOCNO_COLOR_DATA (a);
2194 data->next_bucket_allocno = first_a;
2195 data->prev_bucket_allocno = NULL;
2196 if (first_a != NULL)
2197 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2198 *bucket_ptr = a;
2201 /* Compare two allocnos to define which allocno should be pushed first
2202 into the coloring stack. If the return is a negative number, the
2203 allocno given by the first parameter will be pushed first. In this
2204 case such allocno has less priority than the second one and the
2205 hard register will be assigned to it after assignment to the second
2206 one. As the result of such assignment order, the second allocno
2207 has a better chance to get the best hard register. */
2208 static int
2209 bucket_allocno_compare_func (const void *v1p, const void *v2p)
2211 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2212 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2213 int diff, freq1, freq2, a1_num, a2_num;
2214 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2215 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
2216 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2218 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2219 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2220 if ((diff = freq1 - freq2) != 0)
2221 return diff;
2223 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2224 return diff;
2226 /* Push pseudos requiring less hard registers first. It means that
2227 we will assign pseudos requiring more hard registers first
2228 avoiding creation small holes in free hard register file into
2229 which the pseudos requiring more hard registers can not fit. */
2230 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2231 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
2232 return diff;
2234 freq1 = ALLOCNO_FREQ (a1);
2235 freq2 = ALLOCNO_FREQ (a2);
2236 if ((diff = freq1 - freq2) != 0)
2237 return diff;
2239 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2240 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2241 if ((diff = a2_num - a1_num) != 0)
2242 return diff;
2243 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2246 /* Sort bucket *BUCKET_PTR and return the result through
2247 BUCKET_PTR. */
2248 static void
2249 sort_bucket (ira_allocno_t *bucket_ptr,
2250 int (*compare_func) (const void *, const void *))
2252 ira_allocno_t a, head;
2253 int n;
2255 for (n = 0, a = *bucket_ptr;
2256 a != NULL;
2257 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2258 sorted_allocnos[n++] = a;
2259 if (n <= 1)
2260 return;
2261 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
2262 head = NULL;
2263 for (n--; n >= 0; n--)
2265 a = sorted_allocnos[n];
2266 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2267 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
2268 if (head != NULL)
2269 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
2270 head = a;
2272 *bucket_ptr = head;
2275 /* Add ALLOCNO to colorable bucket maintaining the order according
2276 their priority. ALLOCNO should be not in a bucket before the
2277 call. */
2278 static void
2279 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2281 ira_allocno_t before, after;
2283 form_threads_from_colorable_allocno (allocno);
2284 for (before = colorable_allocno_bucket, after = NULL;
2285 before != NULL;
2286 after = before,
2287 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
2288 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2289 break;
2290 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2291 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
2292 if (after == NULL)
2293 colorable_allocno_bucket = allocno;
2294 else
2295 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
2296 if (before != NULL)
2297 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
2300 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2301 the call. */
2302 static void
2303 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2305 ira_allocno_t prev_allocno, next_allocno;
2307 if (bucket_ptr == &uncolorable_allocno_bucket
2308 && ALLOCNO_CLASS (allocno) != NO_REGS)
2310 uncolorable_allocnos_num--;
2311 ira_assert (uncolorable_allocnos_num >= 0);
2313 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2314 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
2315 if (prev_allocno != NULL)
2316 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
2317 else
2319 ira_assert (*bucket_ptr == allocno);
2320 *bucket_ptr = next_allocno;
2322 if (next_allocno != NULL)
2323 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
2326 /* Put allocno A onto the coloring stack without removing it from its
2327 bucket. Pushing allocno to the coloring stack can result in moving
2328 conflicting allocnos from the uncolorable bucket to the colorable
2329 one. */
2330 static void
2331 push_allocno_to_stack (ira_allocno_t a)
2333 enum reg_class aclass;
2334 allocno_color_data_t data, conflict_data;
2335 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2337 data = ALLOCNO_COLOR_DATA (a);
2338 data->in_graph_p = false;
2339 allocno_stack_vec.safe_push (a);
2340 aclass = ALLOCNO_CLASS (a);
2341 if (aclass == NO_REGS)
2342 return;
2343 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2344 if (n > 1)
2346 /* We will deal with the subwords individually. */
2347 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
2348 size = 1;
2350 for (i = 0; i < n; i++)
2352 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2353 ira_object_t conflict_obj;
2354 ira_object_conflict_iterator oci;
2356 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2358 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2360 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
2361 if (conflict_data->colorable_p
2362 || ! conflict_data->in_graph_p
2363 || ALLOCNO_ASSIGNED_P (conflict_a)
2364 || !(hard_reg_set_intersect_p
2365 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2366 conflict_data->profitable_hard_regs)))
2367 continue;
2368 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2369 ALLOCNO_NUM (conflict_a)));
2370 if (update_left_conflict_sizes_p (conflict_a, a, size))
2372 delete_allocno_from_bucket
2373 (conflict_a, &uncolorable_allocno_bucket);
2374 add_allocno_to_ordered_colorable_bucket (conflict_a);
2375 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2377 fprintf (ira_dump_file, " Making");
2378 ira_print_expanded_allocno (conflict_a);
2379 fprintf (ira_dump_file, " colorable\n");
2387 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2388 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2389 static void
2390 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2392 if (colorable_p)
2393 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2394 else
2395 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2396 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2398 fprintf (ira_dump_file, " Pushing");
2399 ira_print_expanded_allocno (allocno);
2400 if (colorable_p)
2401 fprintf (ira_dump_file, "(cost %d)\n",
2402 ALLOCNO_COLOR_DATA (allocno)->temp);
2403 else
2404 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2405 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
2406 allocno_spill_priority (allocno),
2407 ALLOCNO_COLOR_DATA (allocno)->temp);
2409 if (! colorable_p)
2410 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
2411 push_allocno_to_stack (allocno);
2414 /* Put all allocnos from colorable bucket onto the coloring stack. */
2415 static void
2416 push_only_colorable (void)
2418 form_threads_from_bucket (colorable_allocno_bucket);
2419 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2420 for (;colorable_allocno_bucket != NULL;)
2421 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2424 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2425 loop given by its LOOP_NODE. */
2427 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2429 int freq, i;
2430 edge_iterator ei;
2431 edge e;
2432 vec<edge> edges;
2434 ira_assert (current_loops != NULL && loop_node->loop != NULL
2435 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2436 freq = 0;
2437 if (! exit_p)
2439 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2440 if (e->src != loop_node->loop->latch
2441 && (regno < 0
2442 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2443 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2444 freq += EDGE_FREQUENCY (e);
2446 else
2448 edges = get_loop_exit_edges (loop_node->loop);
2449 FOR_EACH_VEC_ELT (edges, i, e)
2450 if (regno < 0
2451 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2452 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2453 freq += EDGE_FREQUENCY (e);
2454 edges.release ();
2457 return REG_FREQ_FROM_EDGE_FREQ (freq);
2460 /* Calculate and return the cost of putting allocno A into memory. */
2461 static int
2462 calculate_allocno_spill_cost (ira_allocno_t a)
2464 int regno, cost;
2465 machine_mode mode;
2466 enum reg_class rclass;
2467 ira_allocno_t parent_allocno;
2468 ira_loop_tree_node_t parent_node, loop_node;
2470 regno = ALLOCNO_REGNO (a);
2471 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2472 if (ALLOCNO_CAP (a) != NULL)
2473 return cost;
2474 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2475 if ((parent_node = loop_node->parent) == NULL)
2476 return cost;
2477 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2478 return cost;
2479 mode = ALLOCNO_MODE (a);
2480 rclass = ALLOCNO_CLASS (a);
2481 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2482 cost -= (ira_memory_move_cost[mode][rclass][0]
2483 * ira_loop_edge_freq (loop_node, regno, true)
2484 + ira_memory_move_cost[mode][rclass][1]
2485 * ira_loop_edge_freq (loop_node, regno, false));
2486 else
2488 ira_init_register_move_cost_if_necessary (mode);
2489 cost += ((ira_memory_move_cost[mode][rclass][1]
2490 * ira_loop_edge_freq (loop_node, regno, true)
2491 + ira_memory_move_cost[mode][rclass][0]
2492 * ira_loop_edge_freq (loop_node, regno, false))
2493 - (ira_register_move_cost[mode][rclass][rclass]
2494 * (ira_loop_edge_freq (loop_node, regno, false)
2495 + ira_loop_edge_freq (loop_node, regno, true))));
2497 return cost;
2500 /* Used for sorting allocnos for spilling. */
2501 static inline int
2502 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2504 int pri1, pri2, diff;
2506 /* Avoid spilling static chain pointer pseudo when non-local goto is
2507 used. */
2508 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))
2509 return 1;
2510 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)))
2511 return -1;
2512 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2513 return 1;
2514 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2515 return -1;
2516 pri1 = allocno_spill_priority (a1);
2517 pri2 = allocno_spill_priority (a2);
2518 if ((diff = pri1 - pri2) != 0)
2519 return diff;
2520 if ((diff
2521 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2522 return diff;
2523 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2526 /* Used for sorting allocnos for spilling. */
2527 static int
2528 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2530 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2531 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2533 return allocno_spill_priority_compare (p1, p2);
2536 /* Push allocnos to the coloring stack. The order of allocnos in the
2537 stack defines the order for the subsequent coloring. */
2538 static void
2539 push_allocnos_to_stack (void)
2541 ira_allocno_t a;
2542 int cost;
2544 /* Calculate uncolorable allocno spill costs. */
2545 for (a = uncolorable_allocno_bucket;
2546 a != NULL;
2547 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2548 if (ALLOCNO_CLASS (a) != NO_REGS)
2550 cost = calculate_allocno_spill_cost (a);
2551 /* ??? Remove cost of copies between the coalesced
2552 allocnos. */
2553 ALLOCNO_COLOR_DATA (a)->temp = cost;
2555 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2556 for (;;)
2558 push_only_colorable ();
2559 a = uncolorable_allocno_bucket;
2560 if (a == NULL)
2561 break;
2562 remove_allocno_from_bucket_and_push (a, false);
2564 ira_assert (colorable_allocno_bucket == NULL
2565 && uncolorable_allocno_bucket == NULL);
2566 ira_assert (uncolorable_allocnos_num == 0);
2569 /* Pop the coloring stack and assign hard registers to the popped
2570 allocnos. */
2571 static void
2572 pop_allocnos_from_stack (void)
2574 ira_allocno_t allocno;
2575 enum reg_class aclass;
2577 for (;allocno_stack_vec.length () != 0;)
2579 allocno = allocno_stack_vec.pop ();
2580 aclass = ALLOCNO_CLASS (allocno);
2581 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2583 fprintf (ira_dump_file, " Popping");
2584 ira_print_expanded_allocno (allocno);
2585 fprintf (ira_dump_file, " -- ");
2587 if (aclass == NO_REGS)
2589 ALLOCNO_HARD_REGNO (allocno) = -1;
2590 ALLOCNO_ASSIGNED_P (allocno) = true;
2591 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2592 ira_assert
2593 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2594 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2595 fprintf (ira_dump_file, "assign memory\n");
2597 else if (assign_hard_reg (allocno, false))
2599 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2600 fprintf (ira_dump_file, "assign reg %d\n",
2601 ALLOCNO_HARD_REGNO (allocno));
2603 else if (ALLOCNO_ASSIGNED_P (allocno))
2605 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2606 fprintf (ira_dump_file, "spill%s\n",
2607 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2608 ? "" : "!");
2610 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2614 /* Set up number of available hard registers for allocno A. */
2615 static void
2616 setup_allocno_available_regs_num (ira_allocno_t a)
2618 int i, n, hard_regno, hard_regs_num, nwords;
2619 enum reg_class aclass;
2620 allocno_color_data_t data;
2622 aclass = ALLOCNO_CLASS (a);
2623 data = ALLOCNO_COLOR_DATA (a);
2624 data->available_regs_num = 0;
2625 if (aclass == NO_REGS)
2626 return;
2627 hard_regs_num = ira_class_hard_regs_num[aclass];
2628 nwords = ALLOCNO_NUM_OBJECTS (a);
2629 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2631 hard_regno = ira_class_hard_regs[aclass][i];
2632 /* Checking only profitable hard regs. */
2633 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2634 n++;
2636 data->available_regs_num = n;
2637 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2638 return;
2639 fprintf
2640 (ira_dump_file,
2641 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2642 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2643 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2644 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2645 fprintf (ira_dump_file, ", %snode: ",
2646 hard_reg_set_equal_p (data->profitable_hard_regs,
2647 data->hard_regs_node->hard_regs->set)
2648 ? "" : "^");
2649 print_hard_reg_set (ira_dump_file,
2650 data->hard_regs_node->hard_regs->set, false);
2651 for (i = 0; i < nwords; i++)
2653 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2655 if (nwords != 1)
2657 if (i != 0)
2658 fprintf (ira_dump_file, ", ");
2659 fprintf (ira_dump_file, " obj %d", i);
2661 fprintf (ira_dump_file, " (confl regs = ");
2662 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2663 false);
2664 fprintf (ira_dump_file, ")");
2666 fprintf (ira_dump_file, "\n");
2669 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2670 conflicting allocnos and hard registers. */
2671 static void
2672 put_allocno_into_bucket (ira_allocno_t allocno)
2674 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2675 setup_allocno_available_regs_num (allocno);
2676 if (setup_left_conflict_sizes_p (allocno))
2677 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2678 else
2679 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2682 /* Map: allocno number -> allocno priority. */
2683 static int *allocno_priorities;
2685 /* Set up priorities for N allocnos in array
2686 CONSIDERATION_ALLOCNOS. */
2687 static void
2688 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2690 int i, length, nrefs, priority, max_priority, mult;
2691 ira_allocno_t a;
2693 max_priority = 0;
2694 for (i = 0; i < n; i++)
2696 a = consideration_allocnos[i];
2697 nrefs = ALLOCNO_NREFS (a);
2698 ira_assert (nrefs >= 0);
2699 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2700 ira_assert (mult >= 0);
2701 allocno_priorities[ALLOCNO_NUM (a)]
2702 = priority
2703 = (mult
2704 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2705 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2706 if (priority < 0)
2707 priority = -priority;
2708 if (max_priority < priority)
2709 max_priority = priority;
2711 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2712 for (i = 0; i < n; i++)
2714 a = consideration_allocnos[i];
2715 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2716 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2717 length /= ALLOCNO_NUM_OBJECTS (a);
2718 if (length <= 0)
2719 length = 1;
2720 allocno_priorities[ALLOCNO_NUM (a)]
2721 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2725 /* Sort allocnos according to the profit of usage of a hard register
2726 instead of memory for them. */
2727 static int
2728 allocno_cost_compare_func (const void *v1p, const void *v2p)
2730 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2731 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2732 int c1, c2;
2734 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2735 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2736 if (c1 - c2)
2737 return c1 - c2;
2739 /* If regs are equally good, sort by allocno numbers, so that the
2740 results of qsort leave nothing to chance. */
2741 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2744 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2745 possible to hard registers. Let us try to improve allocation with
2746 cost point of view. This function improves the allocation by
2747 spilling some allocnos and assigning the freed hard registers to
2748 other allocnos if it decreases the overall allocation cost. */
2749 static void
2750 improve_allocation (void)
2752 unsigned int i;
2753 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2754 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2755 bool try_p;
2756 enum reg_class aclass;
2757 machine_mode mode;
2758 int *allocno_costs;
2759 int costs[FIRST_PSEUDO_REGISTER];
2760 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2761 ira_allocno_t a;
2762 bitmap_iterator bi;
2764 /* Don't bother to optimize the code with static chain pointer and
2765 non-local goto in order not to spill the chain pointer
2766 pseudo. */
2767 if (cfun->static_chain_decl && crtl->has_nonlocal_goto)
2768 return;
2769 /* Clear counts used to process conflicting allocnos only once for
2770 each allocno. */
2771 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2772 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2773 check = n = 0;
2774 /* Process each allocno and try to assign a hard register to it by
2775 spilling some its conflicting allocnos. */
2776 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2778 a = ira_allocnos[i];
2779 ALLOCNO_COLOR_DATA (a)->temp = 0;
2780 if (empty_profitable_hard_regs (a))
2781 continue;
2782 check++;
2783 aclass = ALLOCNO_CLASS (a);
2784 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2785 if (allocno_costs == NULL)
2786 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2787 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2788 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2789 else if (allocno_costs == NULL)
2790 /* It means that assigning a hard register is not profitable
2791 (we don't waste memory for hard register costs in this
2792 case). */
2793 continue;
2794 else
2795 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2796 try_p = false;
2797 get_conflict_and_start_profitable_regs (a, false,
2798 conflicting_regs,
2799 &profitable_hard_regs);
2800 class_size = ira_class_hard_regs_num[aclass];
2801 /* Set up cost improvement for usage of each profitable hard
2802 register for allocno A. */
2803 for (j = 0; j < class_size; j++)
2805 hregno = ira_class_hard_regs[aclass][j];
2806 if (! check_hard_reg_p (a, hregno,
2807 conflicting_regs, profitable_hard_regs))
2808 continue;
2809 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2810 k = allocno_costs == NULL ? 0 : j;
2811 costs[hregno] = (allocno_costs == NULL
2812 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2813 costs[hregno] -= base_cost;
2814 if (costs[hregno] < 0)
2815 try_p = true;
2817 if (! try_p)
2818 /* There is no chance to improve the allocation cost by
2819 assigning hard register to allocno A even without spilling
2820 conflicting allocnos. */
2821 continue;
2822 mode = ALLOCNO_MODE (a);
2823 nwords = ALLOCNO_NUM_OBJECTS (a);
2824 /* Process each allocno conflicting with A and update the cost
2825 improvement for profitable hard registers of A. To use a
2826 hard register for A we need to spill some conflicting
2827 allocnos and that creates penalty for the cost
2828 improvement. */
2829 for (word = 0; word < nwords; word++)
2831 ira_object_t conflict_obj;
2832 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2833 ira_object_conflict_iterator oci;
2835 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2837 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2839 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2840 /* We already processed this conflicting allocno
2841 because we processed earlier another object of the
2842 conflicting allocno. */
2843 continue;
2844 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2845 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2846 continue;
2847 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2848 k = (ira_class_hard_reg_index
2849 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2850 ira_assert (k >= 0);
2851 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2852 != NULL)
2853 spill_cost -= allocno_costs[k];
2854 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2855 != NULL)
2856 spill_cost -= allocno_costs[k];
2857 else
2858 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2859 conflict_nregs
2860 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2861 for (r = conflict_hregno;
2862 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2863 r--)
2864 if (check_hard_reg_p (a, r,
2865 conflicting_regs, profitable_hard_regs))
2866 costs[r] += spill_cost;
2867 for (r = conflict_hregno + 1;
2868 r < conflict_hregno + conflict_nregs;
2869 r++)
2870 if (check_hard_reg_p (a, r,
2871 conflicting_regs, profitable_hard_regs))
2872 costs[r] += spill_cost;
2875 min_cost = INT_MAX;
2876 best = -1;
2877 /* Now we choose hard register for A which results in highest
2878 allocation cost improvement. */
2879 for (j = 0; j < class_size; j++)
2881 hregno = ira_class_hard_regs[aclass][j];
2882 if (check_hard_reg_p (a, hregno,
2883 conflicting_regs, profitable_hard_regs)
2884 && min_cost > costs[hregno])
2886 best = hregno;
2887 min_cost = costs[hregno];
2890 if (min_cost >= 0)
2891 /* We are in a situation when assigning any hard register to A
2892 by spilling some conflicting allocnos does not improve the
2893 allocation cost. */
2894 continue;
2895 nregs = hard_regno_nregs[best][mode];
2896 /* Now spill conflicting allocnos which contain a hard register
2897 of A when we assign the best chosen hard register to it. */
2898 for (word = 0; word < nwords; word++)
2900 ira_object_t conflict_obj;
2901 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2902 ira_object_conflict_iterator oci;
2904 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2906 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2908 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2909 continue;
2910 conflict_nregs
2911 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2912 if (best + nregs <= conflict_hregno
2913 || conflict_hregno + conflict_nregs <= best)
2914 /* No intersection. */
2915 continue;
2916 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2917 sorted_allocnos[n++] = conflict_a;
2918 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2919 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2920 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2921 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2924 /* Assign the best chosen hard register to A. */
2925 ALLOCNO_HARD_REGNO (a) = best;
2926 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2927 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2928 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2930 if (n == 0)
2931 return;
2932 /* We spilled some allocnos to assign their hard registers to other
2933 allocnos. The spilled allocnos are now in array
2934 'sorted_allocnos'. There is still a possibility that some of the
2935 spilled allocnos can get hard registers. So let us try assign
2936 them hard registers again (just a reminder -- function
2937 'assign_hard_reg' assigns hard registers only if it is possible
2938 and profitable). We process the spilled allocnos with biggest
2939 benefit to get hard register first -- see function
2940 'allocno_cost_compare_func'. */
2941 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2942 allocno_cost_compare_func);
2943 for (j = 0; j < n; j++)
2945 a = sorted_allocnos[j];
2946 ALLOCNO_ASSIGNED_P (a) = false;
2947 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2949 fprintf (ira_dump_file, " ");
2950 ira_print_expanded_allocno (a);
2951 fprintf (ira_dump_file, " -- ");
2953 if (assign_hard_reg (a, false))
2955 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2956 fprintf (ira_dump_file, "assign hard reg %d\n",
2957 ALLOCNO_HARD_REGNO (a));
2959 else
2961 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2962 fprintf (ira_dump_file, "assign memory\n");
2967 /* Sort allocnos according to their priorities. */
2968 static int
2969 allocno_priority_compare_func (const void *v1p, const void *v2p)
2971 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2972 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2973 int pri1, pri2;
2975 /* Assign hard reg to static chain pointer pseudo first when
2976 non-local goto is used. */
2977 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))
2978 return 1;
2979 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)))
2980 return -1;
2981 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2982 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2983 if (pri2 != pri1)
2984 return SORTGT (pri2, pri1);
2986 /* If regs are equally good, sort by allocnos, so that the results of
2987 qsort leave nothing to chance. */
2988 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2991 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2992 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2993 static void
2994 color_allocnos (void)
2996 unsigned int i, n;
2997 bitmap_iterator bi;
2998 ira_allocno_t a;
3000 setup_profitable_hard_regs ();
3001 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3003 int l, nr;
3004 HARD_REG_SET conflict_hard_regs;
3005 allocno_color_data_t data;
3006 ira_pref_t pref, next_pref;
3008 a = ira_allocnos[i];
3009 nr = ALLOCNO_NUM_OBJECTS (a);
3010 CLEAR_HARD_REG_SET (conflict_hard_regs);
3011 for (l = 0; l < nr; l++)
3013 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3014 IOR_HARD_REG_SET (conflict_hard_regs,
3015 OBJECT_CONFLICT_HARD_REGS (obj));
3017 data = ALLOCNO_COLOR_DATA (a);
3018 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
3020 next_pref = pref->next_pref;
3021 if (! ira_hard_reg_in_set_p (pref->hard_regno,
3022 ALLOCNO_MODE (a),
3023 data->profitable_hard_regs))
3024 ira_remove_pref (pref);
3027 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
3029 n = 0;
3030 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3032 a = ira_allocnos[i];
3033 if (ALLOCNO_CLASS (a) == NO_REGS)
3035 ALLOCNO_HARD_REGNO (a) = -1;
3036 ALLOCNO_ASSIGNED_P (a) = true;
3037 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3038 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3039 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3041 fprintf (ira_dump_file, " Spill");
3042 ira_print_expanded_allocno (a);
3043 fprintf (ira_dump_file, "\n");
3045 continue;
3047 sorted_allocnos[n++] = a;
3049 if (n != 0)
3051 setup_allocno_priorities (sorted_allocnos, n);
3052 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3053 allocno_priority_compare_func);
3054 for (i = 0; i < n; i++)
3056 a = sorted_allocnos[i];
3057 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3059 fprintf (ira_dump_file, " ");
3060 ira_print_expanded_allocno (a);
3061 fprintf (ira_dump_file, " -- ");
3063 if (assign_hard_reg (a, false))
3065 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3066 fprintf (ira_dump_file, "assign hard reg %d\n",
3067 ALLOCNO_HARD_REGNO (a));
3069 else
3071 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3072 fprintf (ira_dump_file, "assign memory\n");
3077 else
3079 form_allocno_hard_regs_nodes_forest ();
3080 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3081 print_hard_regs_forest (ira_dump_file);
3082 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3084 a = ira_allocnos[i];
3085 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3087 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3088 update_costs_from_prefs (a);
3090 else
3092 ALLOCNO_HARD_REGNO (a) = -1;
3093 ALLOCNO_ASSIGNED_P (a) = true;
3094 /* We don't need updated costs anymore. */
3095 ira_free_allocno_updated_costs (a);
3096 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3098 fprintf (ira_dump_file, " Spill");
3099 ira_print_expanded_allocno (a);
3100 fprintf (ira_dump_file, "\n");
3104 /* Put the allocnos into the corresponding buckets. */
3105 colorable_allocno_bucket = NULL;
3106 uncolorable_allocno_bucket = NULL;
3107 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3109 a = ira_allocnos[i];
3110 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3111 put_allocno_into_bucket (a);
3113 push_allocnos_to_stack ();
3114 pop_allocnos_from_stack ();
3115 finish_allocno_hard_regs_nodes_forest ();
3117 improve_allocation ();
3122 /* Output information about the loop given by its LOOP_TREE_NODE. */
3123 static void
3124 print_loop_title (ira_loop_tree_node_t loop_tree_node)
3126 unsigned int j;
3127 bitmap_iterator bi;
3128 ira_loop_tree_node_t subloop_node, dest_loop_node;
3129 edge e;
3130 edge_iterator ei;
3132 if (loop_tree_node->parent == NULL)
3133 fprintf (ira_dump_file,
3134 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3135 NUM_FIXED_BLOCKS);
3136 else
3138 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3139 fprintf (ira_dump_file,
3140 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3141 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3142 loop_tree_node->loop->header->index,
3143 loop_depth (loop_tree_node->loop));
3145 for (subloop_node = loop_tree_node->children;
3146 subloop_node != NULL;
3147 subloop_node = subloop_node->next)
3148 if (subloop_node->bb != NULL)
3150 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3151 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
3152 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3153 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3154 != loop_tree_node))
3155 fprintf (ira_dump_file, "(->%d:l%d)",
3156 e->dest->index, dest_loop_node->loop_num);
3158 fprintf (ira_dump_file, "\n all:");
3159 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3160 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3161 fprintf (ira_dump_file, "\n modified regnos:");
3162 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3163 fprintf (ira_dump_file, " %d", j);
3164 fprintf (ira_dump_file, "\n border:");
3165 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3166 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3167 fprintf (ira_dump_file, "\n Pressure:");
3168 for (j = 0; (int) j < ira_pressure_classes_num; j++)
3170 enum reg_class pclass;
3172 pclass = ira_pressure_classes[j];
3173 if (loop_tree_node->reg_pressure[pclass] == 0)
3174 continue;
3175 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3176 loop_tree_node->reg_pressure[pclass]);
3178 fprintf (ira_dump_file, "\n");
3181 /* Color the allocnos inside loop (in the extreme case it can be all
3182 of the function) given the corresponding LOOP_TREE_NODE. The
3183 function is called for each loop during top-down traverse of the
3184 loop tree. */
3185 static void
3186 color_pass (ira_loop_tree_node_t loop_tree_node)
3188 int regno, hard_regno, index = -1, n;
3189 int cost, exit_freq, enter_freq;
3190 unsigned int j;
3191 bitmap_iterator bi;
3192 machine_mode mode;
3193 enum reg_class rclass, aclass, pclass;
3194 ira_allocno_t a, subloop_allocno;
3195 ira_loop_tree_node_t subloop_node;
3197 ira_assert (loop_tree_node->bb == NULL);
3198 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3199 print_loop_title (loop_tree_node);
3201 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3202 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3203 n = 0;
3204 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3206 a = ira_allocnos[j];
3207 n++;
3208 if (! ALLOCNO_ASSIGNED_P (a))
3209 continue;
3210 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3212 allocno_color_data
3213 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3214 * n);
3215 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3216 curr_allocno_process = 0;
3217 n = 0;
3218 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3220 a = ira_allocnos[j];
3221 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3222 n++;
3224 init_allocno_threads ();
3225 /* Color all mentioned allocnos including transparent ones. */
3226 color_allocnos ();
3227 /* Process caps. They are processed just once. */
3228 if (flag_ira_region == IRA_REGION_MIXED
3229 || flag_ira_region == IRA_REGION_ALL)
3230 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3232 a = ira_allocnos[j];
3233 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3234 continue;
3235 /* Remove from processing in the next loop. */
3236 bitmap_clear_bit (consideration_allocno_bitmap, j);
3237 rclass = ALLOCNO_CLASS (a);
3238 pclass = ira_pressure_class_translate[rclass];
3239 if (flag_ira_region == IRA_REGION_MIXED
3240 && (loop_tree_node->reg_pressure[pclass]
3241 <= ira_class_hard_regs_num[pclass]))
3243 mode = ALLOCNO_MODE (a);
3244 hard_regno = ALLOCNO_HARD_REGNO (a);
3245 if (hard_regno >= 0)
3247 index = ira_class_hard_reg_index[rclass][hard_regno];
3248 ira_assert (index >= 0);
3250 regno = ALLOCNO_REGNO (a);
3251 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3252 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3253 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3254 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3255 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3256 if (hard_regno >= 0)
3257 update_costs_from_copies (subloop_allocno, true, true);
3258 /* We don't need updated costs anymore. */
3259 ira_free_allocno_updated_costs (subloop_allocno);
3262 /* Update costs of the corresponding allocnos (not caps) in the
3263 subloops. */
3264 for (subloop_node = loop_tree_node->subloops;
3265 subloop_node != NULL;
3266 subloop_node = subloop_node->subloop_next)
3268 ira_assert (subloop_node->bb == NULL);
3269 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3271 a = ira_allocnos[j];
3272 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3273 mode = ALLOCNO_MODE (a);
3274 rclass = ALLOCNO_CLASS (a);
3275 pclass = ira_pressure_class_translate[rclass];
3276 hard_regno = ALLOCNO_HARD_REGNO (a);
3277 /* Use hard register class here. ??? */
3278 if (hard_regno >= 0)
3280 index = ira_class_hard_reg_index[rclass][hard_regno];
3281 ira_assert (index >= 0);
3283 regno = ALLOCNO_REGNO (a);
3284 /* ??? conflict costs */
3285 subloop_allocno = subloop_node->regno_allocno_map[regno];
3286 if (subloop_allocno == NULL
3287 || ALLOCNO_CAP (subloop_allocno) != NULL)
3288 continue;
3289 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
3290 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3291 ALLOCNO_NUM (subloop_allocno)));
3292 if ((flag_ira_region == IRA_REGION_MIXED
3293 && (loop_tree_node->reg_pressure[pclass]
3294 <= ira_class_hard_regs_num[pclass]))
3295 || (pic_offset_table_rtx != NULL
3296 && regno == (int) REGNO (pic_offset_table_rtx))
3297 /* Avoid overlapped multi-registers. Moves between them
3298 might result in wrong code generation. */
3299 || (hard_regno >= 0
3300 && ira_reg_class_max_nregs[pclass][mode] > 1))
3302 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3304 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3305 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3306 if (hard_regno >= 0)
3307 update_costs_from_copies (subloop_allocno, true, true);
3308 /* We don't need updated costs anymore. */
3309 ira_free_allocno_updated_costs (subloop_allocno);
3311 continue;
3313 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3314 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3315 ira_assert (regno < ira_reg_equiv_len);
3316 if (ira_equiv_no_lvalue_p (regno))
3318 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3320 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3321 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3322 if (hard_regno >= 0)
3323 update_costs_from_copies (subloop_allocno, true, true);
3324 /* We don't need updated costs anymore. */
3325 ira_free_allocno_updated_costs (subloop_allocno);
3328 else if (hard_regno < 0)
3330 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3331 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3332 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3334 else
3336 aclass = ALLOCNO_CLASS (subloop_allocno);
3337 ira_init_register_move_cost_if_necessary (mode);
3338 cost = (ira_register_move_cost[mode][rclass][rclass]
3339 * (exit_freq + enter_freq));
3340 ira_allocate_and_set_or_copy_costs
3341 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3342 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
3343 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3344 ira_allocate_and_set_or_copy_costs
3345 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
3346 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
3347 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3348 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
3349 -= cost;
3350 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3351 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
3352 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3353 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
3354 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3355 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3356 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
3360 ira_free (allocno_color_data);
3361 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3363 a = ira_allocnos[j];
3364 ALLOCNO_ADD_DATA (a) = NULL;
3368 /* Initialize the common data for coloring and calls functions to do
3369 Chaitin-Briggs and regional coloring. */
3370 static void
3371 do_coloring (void)
3373 coloring_allocno_bitmap = ira_allocate_bitmap ();
3374 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3375 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3377 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3379 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3380 ira_print_disposition (ira_dump_file);
3382 ira_free_bitmap (coloring_allocno_bitmap);
3387 /* Move spill/restore code, which are to be generated in ira-emit.c,
3388 to less frequent points (if it is profitable) by reassigning some
3389 allocnos (in loop with subloops containing in another loop) to
3390 memory which results in longer live-range where the corresponding
3391 pseudo-registers will be in memory. */
3392 static void
3393 move_spill_restore (void)
3395 int cost, regno, hard_regno, hard_regno2, index;
3396 bool changed_p;
3397 int enter_freq, exit_freq;
3398 machine_mode mode;
3399 enum reg_class rclass;
3400 ira_allocno_t a, parent_allocno, subloop_allocno;
3401 ira_loop_tree_node_t parent, loop_node, subloop_node;
3402 ira_allocno_iterator ai;
3404 for (;;)
3406 changed_p = false;
3407 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3408 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3409 FOR_EACH_ALLOCNO (a, ai)
3411 regno = ALLOCNO_REGNO (a);
3412 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3413 if (ALLOCNO_CAP_MEMBER (a) != NULL
3414 || ALLOCNO_CAP (a) != NULL
3415 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3416 || loop_node->children == NULL
3417 /* don't do the optimization because it can create
3418 copies and the reload pass can spill the allocno set
3419 by copy although the allocno will not get memory
3420 slot. */
3421 || ira_equiv_no_lvalue_p (regno)
3422 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a))
3423 /* Do not spill static chain pointer pseudo when
3424 non-local goto is used. */
3425 || non_spilled_static_chain_regno_p (regno))
3426 continue;
3427 mode = ALLOCNO_MODE (a);
3428 rclass = ALLOCNO_CLASS (a);
3429 index = ira_class_hard_reg_index[rclass][hard_regno];
3430 ira_assert (index >= 0);
3431 cost = (ALLOCNO_MEMORY_COST (a)
3432 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3433 ? ALLOCNO_CLASS_COST (a)
3434 : ALLOCNO_HARD_REG_COSTS (a)[index]));
3435 ira_init_register_move_cost_if_necessary (mode);
3436 for (subloop_node = loop_node->subloops;
3437 subloop_node != NULL;
3438 subloop_node = subloop_node->subloop_next)
3440 ira_assert (subloop_node->bb == NULL);
3441 subloop_allocno = subloop_node->regno_allocno_map[regno];
3442 if (subloop_allocno == NULL)
3443 continue;
3444 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
3445 /* We have accumulated cost. To get the real cost of
3446 allocno usage in the loop we should subtract costs of
3447 the subloop allocnos. */
3448 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3449 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
3450 ? ALLOCNO_CLASS_COST (subloop_allocno)
3451 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3452 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3453 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3454 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3455 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3456 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3457 else
3459 cost
3460 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3461 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3462 if (hard_regno2 != hard_regno)
3463 cost -= (ira_register_move_cost[mode][rclass][rclass]
3464 * (exit_freq + enter_freq));
3467 if ((parent = loop_node->parent) != NULL
3468 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3470 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
3471 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3472 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3473 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3474 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3475 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3476 else
3478 cost
3479 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3480 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3481 if (hard_regno2 != hard_regno)
3482 cost -= (ira_register_move_cost[mode][rclass][rclass]
3483 * (exit_freq + enter_freq));
3486 if (cost < 0)
3488 ALLOCNO_HARD_REGNO (a) = -1;
3489 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3491 fprintf
3492 (ira_dump_file,
3493 " Moving spill/restore for a%dr%d up from loop %d",
3494 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3495 fprintf (ira_dump_file, " - profit %d\n", -cost);
3497 changed_p = true;
3500 if (! changed_p)
3501 break;
3507 /* Update current hard reg costs and current conflict hard reg costs
3508 for allocno A. It is done by processing its copies containing
3509 other allocnos already assigned. */
3510 static void
3511 update_curr_costs (ira_allocno_t a)
3513 int i, hard_regno, cost;
3514 machine_mode mode;
3515 enum reg_class aclass, rclass;
3516 ira_allocno_t another_a;
3517 ira_copy_t cp, next_cp;
3519 ira_free_allocno_updated_costs (a);
3520 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3521 aclass = ALLOCNO_CLASS (a);
3522 if (aclass == NO_REGS)
3523 return;
3524 mode = ALLOCNO_MODE (a);
3525 ira_init_register_move_cost_if_necessary (mode);
3526 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3528 if (cp->first == a)
3530 next_cp = cp->next_first_allocno_copy;
3531 another_a = cp->second;
3533 else if (cp->second == a)
3535 next_cp = cp->next_second_allocno_copy;
3536 another_a = cp->first;
3538 else
3539 gcc_unreachable ();
3540 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3541 || ! ALLOCNO_ASSIGNED_P (another_a)
3542 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3543 continue;
3544 rclass = REGNO_REG_CLASS (hard_regno);
3545 i = ira_class_hard_reg_index[aclass][hard_regno];
3546 if (i < 0)
3547 continue;
3548 cost = (cp->first == a
3549 ? ira_register_move_cost[mode][rclass][aclass]
3550 : ira_register_move_cost[mode][aclass][rclass]);
3551 ira_allocate_and_set_or_copy_costs
3552 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3553 ALLOCNO_HARD_REG_COSTS (a));
3554 ira_allocate_and_set_or_copy_costs
3555 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3556 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3557 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3558 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3562 /* Try to assign hard registers to the unassigned allocnos and
3563 allocnos conflicting with them or conflicting with allocnos whose
3564 regno >= START_REGNO. The function is called after ira_flattening,
3565 so more allocnos (including ones created in ira-emit.c) will have a
3566 chance to get a hard register. We use simple assignment algorithm
3567 based on priorities. */
3568 void
3569 ira_reassign_conflict_allocnos (int start_regno)
3571 int i, allocnos_to_color_num;
3572 ira_allocno_t a;
3573 enum reg_class aclass;
3574 bitmap allocnos_to_color;
3575 ira_allocno_iterator ai;
3577 allocnos_to_color = ira_allocate_bitmap ();
3578 allocnos_to_color_num = 0;
3579 FOR_EACH_ALLOCNO (a, ai)
3581 int n = ALLOCNO_NUM_OBJECTS (a);
3583 if (! ALLOCNO_ASSIGNED_P (a)
3584 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3586 if (ALLOCNO_CLASS (a) != NO_REGS)
3587 sorted_allocnos[allocnos_to_color_num++] = a;
3588 else
3590 ALLOCNO_ASSIGNED_P (a) = true;
3591 ALLOCNO_HARD_REGNO (a) = -1;
3592 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3593 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3595 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3597 if (ALLOCNO_REGNO (a) < start_regno
3598 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3599 continue;
3600 for (i = 0; i < n; i++)
3602 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3603 ira_object_t conflict_obj;
3604 ira_object_conflict_iterator oci;
3606 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3608 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3610 ira_assert (ira_reg_classes_intersect_p
3611 [aclass][ALLOCNO_CLASS (conflict_a)]);
3612 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3613 continue;
3614 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3618 ira_free_bitmap (allocnos_to_color);
3619 if (allocnos_to_color_num > 1)
3621 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3622 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3623 allocno_priority_compare_func);
3625 for (i = 0; i < allocnos_to_color_num; i++)
3627 a = sorted_allocnos[i];
3628 ALLOCNO_ASSIGNED_P (a) = false;
3629 update_curr_costs (a);
3631 for (i = 0; i < allocnos_to_color_num; i++)
3633 a = sorted_allocnos[i];
3634 if (assign_hard_reg (a, true))
3636 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3637 fprintf
3638 (ira_dump_file,
3639 " Secondary allocation: assign hard reg %d to reg %d\n",
3640 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3647 /* This page contains functions used to find conflicts using allocno
3648 live ranges. */
3650 #ifdef ENABLE_IRA_CHECKING
3652 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3653 intersect. This should be used when there is only one region.
3654 Currently this is used during reload. */
3655 static bool
3656 conflict_by_live_ranges_p (int regno1, int regno2)
3658 ira_allocno_t a1, a2;
3660 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3661 && regno2 >= FIRST_PSEUDO_REGISTER);
3662 /* Reg info calculated by dataflow infrastructure can be different
3663 from one calculated by regclass. */
3664 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3665 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3666 return false;
3667 return allocnos_conflict_by_live_ranges_p (a1, a2);
3670 #endif
3674 /* This page contains code to coalesce memory stack slots used by
3675 spilled allocnos. This results in smaller stack frame, better data
3676 locality, and in smaller code for some architectures like
3677 x86/x86_64 where insn size depends on address displacement value.
3678 On the other hand, it can worsen insn scheduling after the RA but
3679 in practice it is less important than smaller stack frames. */
3681 /* TRUE if we coalesced some allocnos. In other words, if we got
3682 loops formed by members first_coalesced_allocno and
3683 next_coalesced_allocno containing more one allocno. */
3684 static bool allocno_coalesced_p;
3686 /* Bitmap used to prevent a repeated allocno processing because of
3687 coalescing. */
3688 static bitmap processed_coalesced_allocno_bitmap;
3690 /* See below. */
3691 typedef struct coalesce_data *coalesce_data_t;
3693 /* To decrease footprint of ira_allocno structure we store all data
3694 needed only for coalescing in the following structure. */
3695 struct coalesce_data
3697 /* Coalesced allocnos form a cyclic list. One allocno given by
3698 FIRST represents all coalesced allocnos. The
3699 list is chained by NEXT. */
3700 ira_allocno_t first;
3701 ira_allocno_t next;
3702 int temp;
3705 /* Container for storing allocno data concerning coalescing. */
3706 static coalesce_data_t allocno_coalesce_data;
3708 /* Macro to access the data concerning coalescing. */
3709 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3711 /* Merge two sets of coalesced allocnos given correspondingly by
3712 allocnos A1 and A2 (more accurately merging A2 set into A1
3713 set). */
3714 static void
3715 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3717 ira_allocno_t a, first, last, next;
3719 first = ALLOCNO_COALESCE_DATA (a1)->first;
3720 a = ALLOCNO_COALESCE_DATA (a2)->first;
3721 if (first == a)
3722 return;
3723 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3724 a = ALLOCNO_COALESCE_DATA (a)->next)
3726 ALLOCNO_COALESCE_DATA (a)->first = first;
3727 if (a == a2)
3728 break;
3729 last = a;
3731 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3732 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3733 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3736 /* Return TRUE if there are conflicting allocnos from two sets of
3737 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3738 use live ranges to find conflicts because conflicts are represented
3739 only for allocnos of the same allocno class and during the reload
3740 pass we coalesce allocnos for sharing stack memory slots. */
3741 static bool
3742 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3744 ira_allocno_t a, conflict_a;
3746 if (allocno_coalesced_p)
3748 bitmap_clear (processed_coalesced_allocno_bitmap);
3749 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3750 a = ALLOCNO_COALESCE_DATA (a)->next)
3752 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3753 if (a == a1)
3754 break;
3757 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3758 a = ALLOCNO_COALESCE_DATA (a)->next)
3760 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3761 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3763 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3764 return true;
3765 if (conflict_a == a1)
3766 break;
3768 if (a == a2)
3769 break;
3771 return false;
3774 /* The major function for aggressive allocno coalescing. We coalesce
3775 only spilled allocnos. If some allocnos have been coalesced, we
3776 set up flag allocno_coalesced_p. */
3777 static void
3778 coalesce_allocnos (void)
3780 ira_allocno_t a;
3781 ira_copy_t cp, next_cp;
3782 unsigned int j;
3783 int i, n, cp_num, regno;
3784 bitmap_iterator bi;
3786 cp_num = 0;
3787 /* Collect copies. */
3788 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3790 a = ira_allocnos[j];
3791 regno = ALLOCNO_REGNO (a);
3792 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3793 || ira_equiv_no_lvalue_p (regno))
3794 continue;
3795 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3797 if (cp->first == a)
3799 next_cp = cp->next_first_allocno_copy;
3800 regno = ALLOCNO_REGNO (cp->second);
3801 /* For priority coloring we coalesce allocnos only with
3802 the same allocno class not with intersected allocno
3803 classes as it were possible. It is done for
3804 simplicity. */
3805 if ((cp->insn != NULL || cp->constraint_p)
3806 && ALLOCNO_ASSIGNED_P (cp->second)
3807 && ALLOCNO_HARD_REGNO (cp->second) < 0
3808 && ! ira_equiv_no_lvalue_p (regno))
3809 sorted_copies[cp_num++] = cp;
3811 else if (cp->second == a)
3812 next_cp = cp->next_second_allocno_copy;
3813 else
3814 gcc_unreachable ();
3817 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3818 /* Coalesced copies, most frequently executed first. */
3819 for (; cp_num != 0;)
3821 for (i = 0; i < cp_num; i++)
3823 cp = sorted_copies[i];
3824 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3826 allocno_coalesced_p = true;
3827 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3828 fprintf
3829 (ira_dump_file,
3830 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3831 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3832 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3833 cp->freq);
3834 merge_allocnos (cp->first, cp->second);
3835 i++;
3836 break;
3839 /* Collect the rest of copies. */
3840 for (n = 0; i < cp_num; i++)
3842 cp = sorted_copies[i];
3843 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3844 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3845 sorted_copies[n++] = cp;
3847 cp_num = n;
3851 /* Usage cost and order number of coalesced allocno set to which
3852 given pseudo register belongs to. */
3853 static int *regno_coalesced_allocno_cost;
3854 static int *regno_coalesced_allocno_num;
3856 /* Sort pseudos according frequencies of coalesced allocno sets they
3857 belong to (putting most frequently ones first), and according to
3858 coalesced allocno set order numbers. */
3859 static int
3860 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3862 const int regno1 = *(const int *) v1p;
3863 const int regno2 = *(const int *) v2p;
3864 int diff;
3866 if ((diff = (regno_coalesced_allocno_cost[regno2]
3867 - regno_coalesced_allocno_cost[regno1])) != 0)
3868 return diff;
3869 if ((diff = (regno_coalesced_allocno_num[regno1]
3870 - regno_coalesced_allocno_num[regno2])) != 0)
3871 return diff;
3872 return regno1 - regno2;
3875 /* Widest width in which each pseudo reg is referred to (via subreg).
3876 It is used for sorting pseudo registers. */
3877 static unsigned int *regno_max_ref_width;
3879 /* Sort pseudos according their slot numbers (putting ones with
3880 smaller numbers first, or last when the frame pointer is not
3881 needed). */
3882 static int
3883 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3885 const int regno1 = *(const int *) v1p;
3886 const int regno2 = *(const int *) v2p;
3887 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3888 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3889 int diff, slot_num1, slot_num2;
3890 int total_size1, total_size2;
3892 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3894 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3895 return regno1 - regno2;
3896 return 1;
3898 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3899 return -1;
3900 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3901 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3902 if ((diff = slot_num1 - slot_num2) != 0)
3903 return (frame_pointer_needed
3904 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
3905 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3906 regno_max_ref_width[regno1]);
3907 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3908 regno_max_ref_width[regno2]);
3909 if ((diff = total_size2 - total_size1) != 0)
3910 return diff;
3911 return regno1 - regno2;
3914 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3915 for coalesced allocno sets containing allocnos with their regnos
3916 given in array PSEUDO_REGNOS of length N. */
3917 static void
3918 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3920 int i, num, regno, cost;
3921 ira_allocno_t allocno, a;
3923 for (num = i = 0; i < n; i++)
3925 regno = pseudo_regnos[i];
3926 allocno = ira_regno_allocno_map[regno];
3927 if (allocno == NULL)
3929 regno_coalesced_allocno_cost[regno] = 0;
3930 regno_coalesced_allocno_num[regno] = ++num;
3931 continue;
3933 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3934 continue;
3935 num++;
3936 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3937 a = ALLOCNO_COALESCE_DATA (a)->next)
3939 cost += ALLOCNO_FREQ (a);
3940 if (a == allocno)
3941 break;
3943 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3944 a = ALLOCNO_COALESCE_DATA (a)->next)
3946 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3947 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3948 if (a == allocno)
3949 break;
3954 /* Collect spilled allocnos representing coalesced allocno sets (the
3955 first coalesced allocno). The collected allocnos are returned
3956 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3957 number of the collected allocnos. The allocnos are given by their
3958 regnos in array PSEUDO_REGNOS of length N. */
3959 static int
3960 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3961 ira_allocno_t *spilled_coalesced_allocnos)
3963 int i, num, regno;
3964 ira_allocno_t allocno;
3966 for (num = i = 0; i < n; i++)
3968 regno = pseudo_regnos[i];
3969 allocno = ira_regno_allocno_map[regno];
3970 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3971 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3972 continue;
3973 spilled_coalesced_allocnos[num++] = allocno;
3975 return num;
3978 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3979 given slot contains live ranges of coalesced allocnos assigned to
3980 given slot. */
3981 static live_range_t *slot_coalesced_allocnos_live_ranges;
3983 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3984 ranges intersected with live ranges of coalesced allocnos assigned
3985 to slot with number N. */
3986 static bool
3987 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3989 ira_allocno_t a;
3991 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3992 a = ALLOCNO_COALESCE_DATA (a)->next)
3994 int i;
3995 int nr = ALLOCNO_NUM_OBJECTS (a);
3997 for (i = 0; i < nr; i++)
3999 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4001 if (ira_live_ranges_intersect_p
4002 (slot_coalesced_allocnos_live_ranges[n],
4003 OBJECT_LIVE_RANGES (obj)))
4004 return true;
4006 if (a == allocno)
4007 break;
4009 return false;
4012 /* Update live ranges of slot to which coalesced allocnos represented
4013 by ALLOCNO were assigned. */
4014 static void
4015 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
4017 int i, n;
4018 ira_allocno_t a;
4019 live_range_t r;
4021 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
4022 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4023 a = ALLOCNO_COALESCE_DATA (a)->next)
4025 int nr = ALLOCNO_NUM_OBJECTS (a);
4026 for (i = 0; i < nr; i++)
4028 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4030 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
4031 slot_coalesced_allocnos_live_ranges[n]
4032 = ira_merge_live_ranges
4033 (slot_coalesced_allocnos_live_ranges[n], r);
4035 if (a == allocno)
4036 break;
4040 /* We have coalesced allocnos involving in copies. Coalesce allocnos
4041 further in order to share the same memory stack slot. Allocnos
4042 representing sets of allocnos coalesced before the call are given
4043 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4044 some allocnos were coalesced in the function. */
4045 static bool
4046 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4048 int i, j, n, last_coalesced_allocno_num;
4049 ira_allocno_t allocno, a;
4050 bool merged_p = false;
4051 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4053 slot_coalesced_allocnos_live_ranges
4054 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4055 memset (slot_coalesced_allocnos_live_ranges, 0,
4056 sizeof (live_range_t) * ira_allocnos_num);
4057 last_coalesced_allocno_num = 0;
4058 /* Coalesce non-conflicting spilled allocnos preferring most
4059 frequently used. */
4060 for (i = 0; i < num; i++)
4062 allocno = spilled_coalesced_allocnos[i];
4063 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4064 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
4065 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4066 continue;
4067 for (j = 0; j < i; j++)
4069 a = spilled_coalesced_allocnos[j];
4070 n = ALLOCNO_COALESCE_DATA (a)->temp;
4071 if (ALLOCNO_COALESCE_DATA (a)->first == a
4072 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
4073 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
4074 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4075 break;
4077 if (j >= i)
4079 /* No coalescing: set up number for coalesced allocnos
4080 represented by ALLOCNO. */
4081 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
4082 setup_slot_coalesced_allocno_live_ranges (allocno);
4084 else
4086 allocno_coalesced_p = true;
4087 merged_p = true;
4088 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4089 fprintf (ira_dump_file,
4090 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4091 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4092 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
4093 ALLOCNO_COALESCE_DATA (allocno)->temp
4094 = ALLOCNO_COALESCE_DATA (a)->temp;
4095 setup_slot_coalesced_allocno_live_ranges (allocno);
4096 merge_allocnos (a, allocno);
4097 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
4100 for (i = 0; i < ira_allocnos_num; i++)
4101 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4102 ira_free (slot_coalesced_allocnos_live_ranges);
4103 return merged_p;
4106 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4107 subsequent assigning stack slots to them in the reload pass. To do
4108 this we coalesce spilled allocnos first to decrease the number of
4109 memory-memory move insns. This function is called by the
4110 reload. */
4111 void
4112 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4113 unsigned int *reg_max_ref_width)
4115 int max_regno = max_reg_num ();
4116 int i, regno, num, slot_num;
4117 ira_allocno_t allocno, a;
4118 ira_allocno_iterator ai;
4119 ira_allocno_t *spilled_coalesced_allocnos;
4121 ira_assert (! ira_use_lra_p);
4123 /* Set up allocnos can be coalesced. */
4124 coloring_allocno_bitmap = ira_allocate_bitmap ();
4125 for (i = 0; i < n; i++)
4127 regno = pseudo_regnos[i];
4128 allocno = ira_regno_allocno_map[regno];
4129 if (allocno != NULL)
4130 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
4132 allocno_coalesced_p = false;
4133 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4134 allocno_coalesce_data
4135 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4136 * ira_allocnos_num);
4137 /* Initialize coalesce data for allocnos. */
4138 FOR_EACH_ALLOCNO (a, ai)
4140 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4141 ALLOCNO_COALESCE_DATA (a)->first = a;
4142 ALLOCNO_COALESCE_DATA (a)->next = a;
4144 coalesce_allocnos ();
4145 ira_free_bitmap (coloring_allocno_bitmap);
4146 regno_coalesced_allocno_cost
4147 = (int *) ira_allocate (max_regno * sizeof (int));
4148 regno_coalesced_allocno_num
4149 = (int *) ira_allocate (max_regno * sizeof (int));
4150 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4151 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4152 /* Sort regnos according frequencies of the corresponding coalesced
4153 allocno sets. */
4154 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4155 spilled_coalesced_allocnos
4156 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4157 * sizeof (ira_allocno_t));
4158 /* Collect allocnos representing the spilled coalesced allocno
4159 sets. */
4160 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4161 spilled_coalesced_allocnos);
4162 if (flag_ira_share_spill_slots
4163 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4165 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4166 qsort (pseudo_regnos, n, sizeof (int),
4167 coalesced_pseudo_reg_freq_compare);
4168 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4169 spilled_coalesced_allocnos);
4171 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4172 allocno_coalesced_p = false;
4173 /* Assign stack slot numbers to spilled allocno sets, use smaller
4174 numbers for most frequently used coalesced allocnos. -1 is
4175 reserved for dynamic search of stack slots for pseudos spilled by
4176 the reload. */
4177 slot_num = 1;
4178 for (i = 0; i < num; i++)
4180 allocno = spilled_coalesced_allocnos[i];
4181 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4182 || ALLOCNO_HARD_REGNO (allocno) >= 0
4183 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4184 continue;
4185 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4186 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4187 slot_num++;
4188 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4189 a = ALLOCNO_COALESCE_DATA (a)->next)
4191 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4192 ALLOCNO_HARD_REGNO (a) = -slot_num;
4193 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4194 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
4195 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
4196 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
4197 reg_max_ref_width[ALLOCNO_REGNO (a)]));
4199 if (a == allocno)
4200 break;
4202 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4203 fprintf (ira_dump_file, "\n");
4205 ira_spilled_reg_stack_slots_num = slot_num - 1;
4206 ira_free (spilled_coalesced_allocnos);
4207 /* Sort regnos according the slot numbers. */
4208 regno_max_ref_width = reg_max_ref_width;
4209 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
4210 FOR_EACH_ALLOCNO (a, ai)
4211 ALLOCNO_ADD_DATA (a) = NULL;
4212 ira_free (allocno_coalesce_data);
4213 ira_free (regno_coalesced_allocno_num);
4214 ira_free (regno_coalesced_allocno_cost);
4219 /* This page contains code used by the reload pass to improve the
4220 final code. */
4222 /* The function is called from reload to mark changes in the
4223 allocation of REGNO made by the reload. Remember that reg_renumber
4224 reflects the change result. */
4225 void
4226 ira_mark_allocation_change (int regno)
4228 ira_allocno_t a = ira_regno_allocno_map[regno];
4229 int old_hard_regno, hard_regno, cost;
4230 enum reg_class aclass = ALLOCNO_CLASS (a);
4232 ira_assert (a != NULL);
4233 hard_regno = reg_renumber[regno];
4234 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4235 return;
4236 if (old_hard_regno < 0)
4237 cost = -ALLOCNO_MEMORY_COST (a);
4238 else
4240 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
4241 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
4242 ? ALLOCNO_CLASS_COST (a)
4243 : ALLOCNO_HARD_REG_COSTS (a)
4244 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
4245 update_costs_from_copies (a, false, false);
4247 ira_overall_cost -= cost;
4248 ALLOCNO_HARD_REGNO (a) = hard_regno;
4249 if (hard_regno < 0)
4251 ALLOCNO_HARD_REGNO (a) = -1;
4252 cost += ALLOCNO_MEMORY_COST (a);
4254 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
4256 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
4257 ? ALLOCNO_CLASS_COST (a)
4258 : ALLOCNO_HARD_REG_COSTS (a)
4259 [ira_class_hard_reg_index[aclass][hard_regno]]);
4260 update_costs_from_copies (a, true, false);
4262 else
4263 /* Reload changed class of the allocno. */
4264 cost = 0;
4265 ira_overall_cost += cost;
4268 /* This function is called when reload deletes memory-memory move. In
4269 this case we marks that the allocation of the corresponding
4270 allocnos should be not changed in future. Otherwise we risk to get
4271 a wrong code. */
4272 void
4273 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4275 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4276 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4278 ira_assert (dst != NULL && src != NULL
4279 && ALLOCNO_HARD_REGNO (dst) < 0
4280 && ALLOCNO_HARD_REGNO (src) < 0);
4281 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4282 ALLOCNO_DONT_REASSIGN_P (src) = true;
4285 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4286 allocno A and return TRUE in the case of success. */
4287 static bool
4288 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4290 int hard_regno;
4291 enum reg_class aclass;
4292 int regno = ALLOCNO_REGNO (a);
4293 HARD_REG_SET saved[2];
4294 int i, n;
4296 n = ALLOCNO_NUM_OBJECTS (a);
4297 for (i = 0; i < n; i++)
4299 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4300 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4301 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4302 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4303 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4304 call_used_reg_set);
4306 ALLOCNO_ASSIGNED_P (a) = false;
4307 aclass = ALLOCNO_CLASS (a);
4308 update_curr_costs (a);
4309 assign_hard_reg (a, true);
4310 hard_regno = ALLOCNO_HARD_REGNO (a);
4311 reg_renumber[regno] = hard_regno;
4312 if (hard_regno < 0)
4313 ALLOCNO_HARD_REGNO (a) = -1;
4314 else
4316 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4317 ira_overall_cost
4318 -= (ALLOCNO_MEMORY_COST (a)
4319 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4320 ? ALLOCNO_CLASS_COST (a)
4321 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4322 [aclass][hard_regno]]));
4323 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
4324 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4325 call_used_reg_set))
4327 ira_assert (flag_caller_saves);
4328 caller_save_needed = 1;
4332 /* If we found a hard register, modify the RTL for the pseudo
4333 register to show the hard register, and mark the pseudo register
4334 live. */
4335 if (reg_renumber[regno] >= 0)
4337 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4338 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4339 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4340 mark_home_live (regno);
4342 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4343 fprintf (ira_dump_file, "\n");
4344 for (i = 0; i < n; i++)
4346 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4347 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4349 return reg_renumber[regno] >= 0;
4352 /* Sort pseudos according their usage frequencies (putting most
4353 frequently ones first). */
4354 static int
4355 pseudo_reg_compare (const void *v1p, const void *v2p)
4357 int regno1 = *(const int *) v1p;
4358 int regno2 = *(const int *) v2p;
4359 int diff;
4361 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4362 return diff;
4363 return regno1 - regno2;
4366 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4367 NUM of them) or spilled pseudos conflicting with pseudos in
4368 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4369 allocation has been changed. The function doesn't use
4370 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4371 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4372 is called by the reload pass at the end of each reload
4373 iteration. */
4374 bool
4375 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4376 HARD_REG_SET bad_spill_regs,
4377 HARD_REG_SET *pseudo_forbidden_regs,
4378 HARD_REG_SET *pseudo_previous_regs,
4379 bitmap spilled)
4381 int i, n, regno;
4382 bool changed_p;
4383 ira_allocno_t a;
4384 HARD_REG_SET forbidden_regs;
4385 bitmap temp = BITMAP_ALLOC (NULL);
4387 /* Add pseudos which conflict with pseudos already in
4388 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4389 to allocating in two steps as some of the conflicts might have
4390 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4391 for (i = 0; i < num; i++)
4392 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4394 for (i = 0, n = num; i < n; i++)
4396 int nr, j;
4397 int regno = spilled_pseudo_regs[i];
4398 bitmap_set_bit (temp, regno);
4400 a = ira_regno_allocno_map[regno];
4401 nr = ALLOCNO_NUM_OBJECTS (a);
4402 for (j = 0; j < nr; j++)
4404 ira_object_t conflict_obj;
4405 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4406 ira_object_conflict_iterator oci;
4408 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4410 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4411 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4412 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4413 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4415 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4416 /* ?!? This seems wrong. */
4417 bitmap_set_bit (consideration_allocno_bitmap,
4418 ALLOCNO_NUM (conflict_a));
4424 if (num > 1)
4425 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4426 changed_p = false;
4427 /* Try to assign hard registers to pseudos from
4428 SPILLED_PSEUDO_REGS. */
4429 for (i = 0; i < num; i++)
4431 regno = spilled_pseudo_regs[i];
4432 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4433 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4434 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4435 gcc_assert (reg_renumber[regno] < 0);
4436 a = ira_regno_allocno_map[regno];
4437 ira_mark_allocation_change (regno);
4438 ira_assert (reg_renumber[regno] < 0);
4439 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4440 fprintf (ira_dump_file,
4441 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4442 ALLOCNO_MEMORY_COST (a)
4443 - ALLOCNO_CLASS_COST (a));
4444 allocno_reload_assign (a, forbidden_regs);
4445 if (reg_renumber[regno] >= 0)
4447 CLEAR_REGNO_REG_SET (spilled, regno);
4448 changed_p = true;
4451 BITMAP_FREE (temp);
4452 return changed_p;
4455 /* The function is called by reload and returns already allocated
4456 stack slot (if any) for REGNO with given INHERENT_SIZE and
4457 TOTAL_SIZE. In the case of failure to find a slot which can be
4458 used for REGNO, the function returns NULL. */
4460 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4461 unsigned int total_size)
4463 unsigned int i;
4464 int slot_num, best_slot_num;
4465 int cost, best_cost;
4466 ira_copy_t cp, next_cp;
4467 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4468 rtx x;
4469 bitmap_iterator bi;
4470 struct ira_spilled_reg_stack_slot *slot = NULL;
4472 ira_assert (! ira_use_lra_p);
4474 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4475 && inherent_size <= total_size
4476 && ALLOCNO_HARD_REGNO (allocno) < 0);
4477 if (! flag_ira_share_spill_slots)
4478 return NULL_RTX;
4479 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4480 if (slot_num != -1)
4482 slot = &ira_spilled_reg_stack_slots[slot_num];
4483 x = slot->mem;
4485 else
4487 best_cost = best_slot_num = -1;
4488 x = NULL_RTX;
4489 /* It means that the pseudo was spilled in the reload pass, try
4490 to reuse a slot. */
4491 for (slot_num = 0;
4492 slot_num < ira_spilled_reg_stack_slots_num;
4493 slot_num++)
4495 slot = &ira_spilled_reg_stack_slots[slot_num];
4496 if (slot->mem == NULL_RTX)
4497 continue;
4498 if (slot->width < total_size
4499 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4500 continue;
4502 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4503 FIRST_PSEUDO_REGISTER, i, bi)
4505 another_allocno = ira_regno_allocno_map[i];
4506 if (allocnos_conflict_by_live_ranges_p (allocno,
4507 another_allocno))
4508 goto cont;
4510 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4511 cp != NULL;
4512 cp = next_cp)
4514 if (cp->first == allocno)
4516 next_cp = cp->next_first_allocno_copy;
4517 another_allocno = cp->second;
4519 else if (cp->second == allocno)
4521 next_cp = cp->next_second_allocno_copy;
4522 another_allocno = cp->first;
4524 else
4525 gcc_unreachable ();
4526 if (cp->insn == NULL_RTX)
4527 continue;
4528 if (bitmap_bit_p (&slot->spilled_regs,
4529 ALLOCNO_REGNO (another_allocno)))
4530 cost += cp->freq;
4532 if (cost > best_cost)
4534 best_cost = cost;
4535 best_slot_num = slot_num;
4537 cont:
4540 if (best_cost >= 0)
4542 slot_num = best_slot_num;
4543 slot = &ira_spilled_reg_stack_slots[slot_num];
4544 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4545 x = slot->mem;
4546 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4549 if (x != NULL_RTX)
4551 ira_assert (slot->width >= total_size);
4552 #ifdef ENABLE_IRA_CHECKING
4553 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4554 FIRST_PSEUDO_REGISTER, i, bi)
4556 ira_assert (! conflict_by_live_ranges_p (regno, i));
4558 #endif
4559 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4560 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4562 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4563 regno, REG_FREQ (regno), slot_num);
4564 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4565 FIRST_PSEUDO_REGISTER, i, bi)
4567 if ((unsigned) regno != i)
4568 fprintf (ira_dump_file, " %d", i);
4570 fprintf (ira_dump_file, "\n");
4573 return x;
4576 /* This is called by reload every time a new stack slot X with
4577 TOTAL_SIZE was allocated for REGNO. We store this info for
4578 subsequent ira_reuse_stack_slot calls. */
4579 void
4580 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4582 struct ira_spilled_reg_stack_slot *slot;
4583 int slot_num;
4584 ira_allocno_t allocno;
4586 ira_assert (! ira_use_lra_p);
4588 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4589 allocno = ira_regno_allocno_map[regno];
4590 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4591 if (slot_num == -1)
4593 slot_num = ira_spilled_reg_stack_slots_num++;
4594 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4596 slot = &ira_spilled_reg_stack_slots[slot_num];
4597 INIT_REG_SET (&slot->spilled_regs);
4598 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4599 slot->mem = x;
4600 slot->width = total_size;
4601 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4602 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4603 regno, REG_FREQ (regno), slot_num);
4607 /* Return spill cost for pseudo-registers whose numbers are in array
4608 REGNOS (with a negative number as an end marker) for reload with
4609 given IN and OUT for INSN. Return also number points (through
4610 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4611 the register pressure is high, number of references of the
4612 pseudo-registers (through NREFS), number of callee-clobbered
4613 hard-registers occupied by the pseudo-registers (through
4614 CALL_USED_COUNT), and the first hard regno occupied by the
4615 pseudo-registers (through FIRST_HARD_REGNO). */
4616 static int
4617 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
4618 int *excess_pressure_live_length,
4619 int *nrefs, int *call_used_count, int *first_hard_regno)
4621 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4622 bool in_p, out_p;
4623 int length;
4624 ira_allocno_t a;
4626 *nrefs = 0;
4627 for (length = count = cost = i = 0;; i++)
4629 regno = regnos[i];
4630 if (regno < 0)
4631 break;
4632 *nrefs += REG_N_REFS (regno);
4633 hard_regno = reg_renumber[regno];
4634 ira_assert (hard_regno >= 0);
4635 a = ira_regno_allocno_map[regno];
4636 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4637 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4638 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4639 for (j = 0; j < nregs; j++)
4640 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4641 break;
4642 if (j == nregs)
4643 count++;
4644 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4645 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4646 if ((in_p || out_p)
4647 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4649 saved_cost = 0;
4650 if (in_p)
4651 saved_cost += ira_memory_move_cost
4652 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4653 if (out_p)
4654 saved_cost
4655 += ira_memory_move_cost
4656 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4657 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4660 *excess_pressure_live_length = length;
4661 *call_used_count = count;
4662 hard_regno = -1;
4663 if (regnos[0] >= 0)
4665 hard_regno = reg_renumber[regnos[0]];
4667 *first_hard_regno = hard_regno;
4668 return cost;
4671 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4672 REGNOS is better than spilling pseudo-registers with numbers in
4673 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4674 function used by the reload pass to make better register spilling
4675 decisions. */
4676 bool
4677 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4678 rtx in, rtx out, rtx_insn *insn)
4680 int cost, other_cost;
4681 int length, other_length;
4682 int nrefs, other_nrefs;
4683 int call_used_count, other_call_used_count;
4684 int hard_regno, other_hard_regno;
4686 cost = calculate_spill_cost (regnos, in, out, insn,
4687 &length, &nrefs, &call_used_count, &hard_regno);
4688 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4689 &other_length, &other_nrefs,
4690 &other_call_used_count,
4691 &other_hard_regno);
4692 if (nrefs == 0 && other_nrefs != 0)
4693 return true;
4694 if (nrefs != 0 && other_nrefs == 0)
4695 return false;
4696 if (cost != other_cost)
4697 return cost < other_cost;
4698 if (length != other_length)
4699 return length > other_length;
4700 #ifdef REG_ALLOC_ORDER
4701 if (hard_regno >= 0 && other_hard_regno >= 0)
4702 return (inv_reg_alloc_order[hard_regno]
4703 < inv_reg_alloc_order[other_hard_regno]);
4704 #else
4705 if (call_used_count != other_call_used_count)
4706 return call_used_count > other_call_used_count;
4707 #endif
4708 return false;
4713 /* Allocate and initialize data necessary for assign_hard_reg. */
4714 void
4715 ira_initiate_assign (void)
4717 sorted_allocnos
4718 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4719 * ira_allocnos_num);
4720 consideration_allocno_bitmap = ira_allocate_bitmap ();
4721 initiate_cost_update ();
4722 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4723 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4724 * sizeof (ira_copy_t));
4727 /* Deallocate data used by assign_hard_reg. */
4728 void
4729 ira_finish_assign (void)
4731 ira_free (sorted_allocnos);
4732 ira_free_bitmap (consideration_allocno_bitmap);
4733 finish_cost_update ();
4734 ira_free (allocno_priorities);
4735 ira_free (sorted_copies);
4740 /* Entry function doing color-based register allocation. */
4741 static void
4742 color (void)
4744 allocno_stack_vec.create (ira_allocnos_num);
4745 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4746 ira_initiate_assign ();
4747 do_coloring ();
4748 ira_finish_assign ();
4749 allocno_stack_vec.release ();
4750 move_spill_restore ();
4755 /* This page contains a simple register allocator without usage of
4756 allocno conflicts. This is used for fast allocation for -O0. */
4758 /* Do register allocation by not using allocno conflicts. It uses
4759 only allocno live ranges. The algorithm is close to Chow's
4760 priority coloring. */
4761 static void
4762 fast_allocation (void)
4764 int i, j, k, num, class_size, hard_regno;
4765 #ifdef STACK_REGS
4766 bool no_stack_reg_p;
4767 #endif
4768 enum reg_class aclass;
4769 machine_mode mode;
4770 ira_allocno_t a;
4771 ira_allocno_iterator ai;
4772 live_range_t r;
4773 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4775 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4776 * ira_allocnos_num);
4777 num = 0;
4778 FOR_EACH_ALLOCNO (a, ai)
4779 sorted_allocnos[num++] = a;
4780 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4781 setup_allocno_priorities (sorted_allocnos, num);
4782 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4783 * ira_max_point);
4784 for (i = 0; i < ira_max_point; i++)
4785 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4786 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4787 allocno_priority_compare_func);
4788 for (i = 0; i < num; i++)
4790 int nr, l;
4792 a = sorted_allocnos[i];
4793 nr = ALLOCNO_NUM_OBJECTS (a);
4794 CLEAR_HARD_REG_SET (conflict_hard_regs);
4795 for (l = 0; l < nr; l++)
4797 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4798 IOR_HARD_REG_SET (conflict_hard_regs,
4799 OBJECT_CONFLICT_HARD_REGS (obj));
4800 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4801 for (j = r->start; j <= r->finish; j++)
4802 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4804 aclass = ALLOCNO_CLASS (a);
4805 ALLOCNO_ASSIGNED_P (a) = true;
4806 ALLOCNO_HARD_REGNO (a) = -1;
4807 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4808 conflict_hard_regs))
4809 continue;
4810 mode = ALLOCNO_MODE (a);
4811 #ifdef STACK_REGS
4812 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4813 #endif
4814 class_size = ira_class_hard_regs_num[aclass];
4815 for (j = 0; j < class_size; j++)
4817 hard_regno = ira_class_hard_regs[aclass][j];
4818 #ifdef STACK_REGS
4819 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4820 && hard_regno <= LAST_STACK_REG)
4821 continue;
4822 #endif
4823 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4824 || (TEST_HARD_REG_BIT
4825 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4826 continue;
4827 ALLOCNO_HARD_REGNO (a) = hard_regno;
4828 for (l = 0; l < nr; l++)
4830 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4831 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4832 for (k = r->start; k <= r->finish; k++)
4833 IOR_HARD_REG_SET (used_hard_regs[k],
4834 ira_reg_mode_hard_regset[hard_regno][mode]);
4836 break;
4839 ira_free (sorted_allocnos);
4840 ira_free (used_hard_regs);
4841 ira_free (allocno_priorities);
4842 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4843 ira_print_disposition (ira_dump_file);
4848 /* Entry function doing coloring. */
4849 void
4850 ira_color (void)
4852 ira_allocno_t a;
4853 ira_allocno_iterator ai;
4855 /* Setup updated costs. */
4856 FOR_EACH_ALLOCNO (a, ai)
4858 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4859 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4861 if (ira_conflicts_p)
4862 color ();
4863 else
4864 fast_allocation ();