[AArch64][14/14] Reuse target_option_current_node when passing pragma string to targe...
[official-gcc.git] / gcc / ira-color.c
blob74d2c2ed6081c42c6dd8e56bbd121abbf7f26099
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "predict.h"
26 #include "tree.h"
27 #include "rtl.h"
28 #include "df.h"
29 #include "tm_p.h"
30 #include "target.h"
31 #include "regs.h"
32 #include "flags.h"
33 #include "alias.h"
34 #include "insn-config.h"
35 #include "expmed.h"
36 #include "dojump.h"
37 #include "explow.h"
38 #include "calls.h"
39 #include "emit-rtl.h"
40 #include "varasm.h"
41 #include "stmt.h"
42 #include "expr.h"
43 #include "diagnostic-core.h"
44 #include "reload.h"
45 #include "params.h"
46 #include "cfgloop.h"
47 #include "ira.h"
48 #include "alloc-pool.h"
49 #include "ira-int.h"
51 typedef struct allocno_hard_regs *allocno_hard_regs_t;
53 /* The structure contains information about hard registers can be
54 assigned to allocnos. Usually it is allocno profitable hard
55 registers but in some cases this set can be a bit different. Major
56 reason of the difference is a requirement to use hard register sets
57 that form a tree or a forest (set of trees), i.e. hard register set
58 of a node should contain hard register sets of its subnodes. */
59 struct allocno_hard_regs
61 /* Hard registers can be assigned to an allocno. */
62 HARD_REG_SET set;
63 /* Overall (spilling) cost of all allocnos with given register
64 set. */
65 int64_t cost;
68 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
70 /* A node representing allocno hard registers. Such nodes form a
71 forest (set of trees). Each subnode of given node in the forest
72 refers for hard register set (usually allocno profitable hard
73 register set) which is a subset of one referred from given
74 node. */
75 struct allocno_hard_regs_node
77 /* Set up number of the node in preorder traversing of the forest. */
78 int preorder_num;
79 /* Used for different calculation like finding conflict size of an
80 allocno. */
81 int check;
82 /* Used for calculation of conflict size of an allocno. The
83 conflict size of the allocno is maximal number of given allocno
84 hard registers needed for allocation of the conflicting allocnos.
85 Given allocno is trivially colored if this number plus the number
86 of hard registers needed for given allocno is not greater than
87 the number of given allocno hard register set. */
88 int conflict_size;
89 /* The number of hard registers given by member hard_regs. */
90 int hard_regs_num;
91 /* The following member is used to form the final forest. */
92 bool used_p;
93 /* Pointer to the corresponding profitable hard registers. */
94 allocno_hard_regs_t hard_regs;
95 /* Parent, first subnode, previous and next node with the same
96 parent in the forest. */
97 allocno_hard_regs_node_t parent, first, prev, next;
100 /* Info about changing hard reg costs of an allocno. */
101 struct update_cost_record
103 /* Hard regno for which we changed the cost. */
104 int hard_regno;
105 /* Divisor used when we changed the cost of HARD_REGNO. */
106 int divisor;
107 /* Next record for given allocno. */
108 struct update_cost_record *next;
111 /* To decrease footprint of ira_allocno structure we store all data
112 needed only for coloring in the following structure. */
113 struct allocno_color_data
115 /* TRUE value means that the allocno was not removed yet from the
116 conflicting graph during coloring. */
117 unsigned int in_graph_p : 1;
118 /* TRUE if it is put on the stack to make other allocnos
119 colorable. */
120 unsigned int may_be_spilled_p : 1;
121 /* TRUE if the allocno is trivially colorable. */
122 unsigned int colorable_p : 1;
123 /* Number of hard registers of the allocno class really
124 available for the allocno allocation. It is number of the
125 profitable hard regs. */
126 int available_regs_num;
127 /* Allocnos in a bucket (used in coloring) chained by the following
128 two members. */
129 ira_allocno_t next_bucket_allocno;
130 ira_allocno_t prev_bucket_allocno;
131 /* Used for temporary purposes. */
132 int temp;
133 /* Used to exclude repeated processing. */
134 int last_process;
135 /* Profitable hard regs available for this pseudo allocation. It
136 means that the set excludes unavailable hard regs and hard regs
137 conflicting with given pseudo. They should be of the allocno
138 class. */
139 HARD_REG_SET profitable_hard_regs;
140 /* The allocno hard registers node. */
141 allocno_hard_regs_node_t hard_regs_node;
142 /* Array of structures allocno_hard_regs_subnode representing
143 given allocno hard registers node (the 1st element in the array)
144 and all its subnodes in the tree (forest) of allocno hard
145 register nodes (see comments above). */
146 int hard_regs_subnodes_start;
147 /* The length of the previous array. */
148 int hard_regs_subnodes_num;
149 /* Records about updating allocno hard reg costs from copies. If
150 the allocno did not get expected hard register, these records are
151 used to restore original hard reg costs of allocnos connected to
152 this allocno by copies. */
153 struct update_cost_record *update_cost_records;
154 /* Threads. We collect allocnos connected by copies into threads
155 and try to assign hard regs to allocnos by threads. */
156 /* Allocno representing all thread. */
157 ira_allocno_t first_thread_allocno;
158 /* Allocnos in thread forms a cycle list through the following
159 member. */
160 ira_allocno_t next_thread_allocno;
161 /* All thread frequency. Defined only for first thread allocno. */
162 int thread_freq;
165 /* See above. */
166 typedef struct allocno_color_data *allocno_color_data_t;
168 /* Container for storing allocno data concerning coloring. */
169 static allocno_color_data_t allocno_color_data;
171 /* Macro to access the data concerning coloring. */
172 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
174 /* Used for finding allocno colorability to exclude repeated allocno
175 processing and for updating preferencing to exclude repeated
176 allocno processing during assignment. */
177 static int curr_allocno_process;
179 /* This file contains code for regional graph coloring, spill/restore
180 code placement optimization, and code helping the reload pass to do
181 a better job. */
183 /* Bitmap of allocnos which should be colored. */
184 static bitmap coloring_allocno_bitmap;
186 /* Bitmap of allocnos which should be taken into account during
187 coloring. In general case it contains allocnos from
188 coloring_allocno_bitmap plus other already colored conflicting
189 allocnos. */
190 static bitmap consideration_allocno_bitmap;
192 /* All allocnos sorted according their priorities. */
193 static ira_allocno_t *sorted_allocnos;
195 /* Vec representing the stack of allocnos used during coloring. */
196 static vec<ira_allocno_t> allocno_stack_vec;
198 /* Helper for qsort comparison callbacks - return a positive integer if
199 X > Y, or a negative value otherwise. Use a conditional expression
200 instead of a difference computation to insulate from possible overflow
201 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
202 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
206 /* Definition of vector of allocno hard registers. */
208 /* Vector of unique allocno hard registers. */
209 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
211 struct allocno_hard_regs_hasher : nofree_ptr_hash <allocno_hard_regs>
213 static inline hashval_t hash (const allocno_hard_regs *);
214 static inline bool equal (const allocno_hard_regs *,
215 const allocno_hard_regs *);
218 /* Returns hash value for allocno hard registers V. */
219 inline hashval_t
220 allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
222 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
225 /* Compares allocno hard registers V1 and V2. */
226 inline bool
227 allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
228 const allocno_hard_regs *hv2)
230 return hard_reg_set_equal_p (hv1->set, hv2->set);
233 /* Hash table of unique allocno hard registers. */
234 static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
236 /* Return allocno hard registers in the hash table equal to HV. */
237 static allocno_hard_regs_t
238 find_hard_regs (allocno_hard_regs_t hv)
240 return allocno_hard_regs_htab->find (hv);
243 /* Insert allocno hard registers HV in the hash table (if it is not
244 there yet) and return the value which in the table. */
245 static allocno_hard_regs_t
246 insert_hard_regs (allocno_hard_regs_t hv)
248 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
250 if (*slot == NULL)
251 *slot = hv;
252 return *slot;
255 /* Initialize data concerning allocno hard registers. */
256 static void
257 init_allocno_hard_regs (void)
259 allocno_hard_regs_vec.create (200);
260 allocno_hard_regs_htab
261 = new hash_table<allocno_hard_regs_hasher> (200);
264 /* Add (or update info about) allocno hard registers with SET and
265 COST. */
266 static allocno_hard_regs_t
267 add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
269 struct allocno_hard_regs temp;
270 allocno_hard_regs_t hv;
272 gcc_assert (! hard_reg_set_empty_p (set));
273 COPY_HARD_REG_SET (temp.set, set);
274 if ((hv = find_hard_regs (&temp)) != NULL)
275 hv->cost += cost;
276 else
278 hv = ((struct allocno_hard_regs *)
279 ira_allocate (sizeof (struct allocno_hard_regs)));
280 COPY_HARD_REG_SET (hv->set, set);
281 hv->cost = cost;
282 allocno_hard_regs_vec.safe_push (hv);
283 insert_hard_regs (hv);
285 return hv;
288 /* Finalize data concerning allocno hard registers. */
289 static void
290 finish_allocno_hard_regs (void)
292 int i;
293 allocno_hard_regs_t hv;
295 for (i = 0;
296 allocno_hard_regs_vec.iterate (i, &hv);
297 i++)
298 ira_free (hv);
299 delete allocno_hard_regs_htab;
300 allocno_hard_regs_htab = NULL;
301 allocno_hard_regs_vec.release ();
304 /* Sort hard regs according to their frequency of usage. */
305 static int
306 allocno_hard_regs_compare (const void *v1p, const void *v2p)
308 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
309 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
311 if (hv2->cost > hv1->cost)
312 return 1;
313 else if (hv2->cost < hv1->cost)
314 return -1;
315 else
316 return 0;
321 /* Used for finding a common ancestor of two allocno hard registers
322 nodes in the forest. We use the current value of
323 'node_check_tick' to mark all nodes from one node to the top and
324 then walking up from another node until we find a marked node.
326 It is also used to figure out allocno colorability as a mark that
327 we already reset value of member 'conflict_size' for the forest
328 node corresponding to the processed allocno. */
329 static int node_check_tick;
331 /* Roots of the forest containing hard register sets can be assigned
332 to allocnos. */
333 static allocno_hard_regs_node_t hard_regs_roots;
335 /* Definition of vector of allocno hard register nodes. */
337 /* Vector used to create the forest. */
338 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
340 /* Create and return allocno hard registers node containing allocno
341 hard registers HV. */
342 static allocno_hard_regs_node_t
343 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
345 allocno_hard_regs_node_t new_node;
347 new_node = ((struct allocno_hard_regs_node *)
348 ira_allocate (sizeof (struct allocno_hard_regs_node)));
349 new_node->check = 0;
350 new_node->hard_regs = hv;
351 new_node->hard_regs_num = hard_reg_set_size (hv->set);
352 new_node->first = NULL;
353 new_node->used_p = false;
354 return new_node;
357 /* Add allocno hard registers node NEW_NODE to the forest on its level
358 given by ROOTS. */
359 static void
360 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
361 allocno_hard_regs_node_t new_node)
363 new_node->next = *roots;
364 if (new_node->next != NULL)
365 new_node->next->prev = new_node;
366 new_node->prev = NULL;
367 *roots = new_node;
370 /* Add allocno hard registers HV (or its best approximation if it is
371 not possible) to the forest on its level given by ROOTS. */
372 static void
373 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
374 allocno_hard_regs_t hv)
376 unsigned int i, start;
377 allocno_hard_regs_node_t node, prev, new_node;
378 HARD_REG_SET temp_set;
379 allocno_hard_regs_t hv2;
381 start = hard_regs_node_vec.length ();
382 for (node = *roots; node != NULL; node = node->next)
384 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
385 return;
386 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
388 add_allocno_hard_regs_to_forest (&node->first, hv);
389 return;
391 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
392 hard_regs_node_vec.safe_push (node);
393 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
395 COPY_HARD_REG_SET (temp_set, hv->set);
396 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
397 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
398 add_allocno_hard_regs_to_forest (&node->first, hv2);
401 if (hard_regs_node_vec.length ()
402 > start + 1)
404 /* Create a new node which contains nodes in hard_regs_node_vec. */
405 CLEAR_HARD_REG_SET (temp_set);
406 for (i = start;
407 i < hard_regs_node_vec.length ();
408 i++)
410 node = hard_regs_node_vec[i];
411 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
413 hv = add_allocno_hard_regs (temp_set, hv->cost);
414 new_node = create_new_allocno_hard_regs_node (hv);
415 prev = NULL;
416 for (i = start;
417 i < hard_regs_node_vec.length ();
418 i++)
420 node = hard_regs_node_vec[i];
421 if (node->prev == NULL)
422 *roots = node->next;
423 else
424 node->prev->next = node->next;
425 if (node->next != NULL)
426 node->next->prev = node->prev;
427 if (prev == NULL)
428 new_node->first = node;
429 else
430 prev->next = node;
431 node->prev = prev;
432 node->next = NULL;
433 prev = node;
435 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
437 hard_regs_node_vec.truncate (start);
440 /* Add allocno hard registers nodes starting with the forest level
441 given by FIRST which contains biggest set inside SET. */
442 static void
443 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
444 HARD_REG_SET set)
446 allocno_hard_regs_node_t node;
448 ira_assert (first != NULL);
449 for (node = first; node != NULL; node = node->next)
450 if (hard_reg_set_subset_p (node->hard_regs->set, set))
451 hard_regs_node_vec.safe_push (node);
452 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
453 collect_allocno_hard_regs_cover (node->first, set);
456 /* Set up field parent as PARENT in all allocno hard registers nodes
457 in forest given by FIRST. */
458 static void
459 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
460 allocno_hard_regs_node_t parent)
462 allocno_hard_regs_node_t node;
464 for (node = first; node != NULL; node = node->next)
466 node->parent = parent;
467 setup_allocno_hard_regs_nodes_parent (node->first, node);
471 /* Return allocno hard registers node which is a first common ancestor
472 node of FIRST and SECOND in the forest. */
473 static allocno_hard_regs_node_t
474 first_common_ancestor_node (allocno_hard_regs_node_t first,
475 allocno_hard_regs_node_t second)
477 allocno_hard_regs_node_t node;
479 node_check_tick++;
480 for (node = first; node != NULL; node = node->parent)
481 node->check = node_check_tick;
482 for (node = second; node != NULL; node = node->parent)
483 if (node->check == node_check_tick)
484 return node;
485 return first_common_ancestor_node (second, first);
488 /* Print hard reg set SET to F. */
489 static void
490 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
492 int i, start;
494 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
496 if (TEST_HARD_REG_BIT (set, i))
498 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
499 start = i;
501 if (start >= 0
502 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
504 if (start == i - 1)
505 fprintf (f, " %d", start);
506 else if (start == i - 2)
507 fprintf (f, " %d %d", start, start + 1);
508 else
509 fprintf (f, " %d-%d", start, i - 1);
510 start = -1;
513 if (new_line_p)
514 fprintf (f, "\n");
517 /* Print allocno hard register subforest given by ROOTS and its LEVEL
518 to F. */
519 static void
520 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
521 int level)
523 int i;
524 allocno_hard_regs_node_t node;
526 for (node = roots; node != NULL; node = node->next)
528 fprintf (f, " ");
529 for (i = 0; i < level * 2; i++)
530 fprintf (f, " ");
531 fprintf (f, "%d:(", node->preorder_num);
532 print_hard_reg_set (f, node->hard_regs->set, false);
533 fprintf (f, ")@%" PRId64"\n", node->hard_regs->cost);
534 print_hard_regs_subforest (f, node->first, level + 1);
538 /* Print the allocno hard register forest to F. */
539 static void
540 print_hard_regs_forest (FILE *f)
542 fprintf (f, " Hard reg set forest:\n");
543 print_hard_regs_subforest (f, hard_regs_roots, 1);
546 /* Print the allocno hard register forest to stderr. */
547 void
548 ira_debug_hard_regs_forest (void)
550 print_hard_regs_forest (stderr);
553 /* Remove unused allocno hard registers nodes from forest given by its
554 *ROOTS. */
555 static void
556 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
558 allocno_hard_regs_node_t node, prev, next, last;
560 for (prev = NULL, node = *roots; node != NULL; node = next)
562 next = node->next;
563 if (node->used_p)
565 remove_unused_allocno_hard_regs_nodes (&node->first);
566 prev = node;
568 else
570 for (last = node->first;
571 last != NULL && last->next != NULL;
572 last = last->next)
574 if (last != NULL)
576 if (prev == NULL)
577 *roots = node->first;
578 else
579 prev->next = node->first;
580 if (next != NULL)
581 next->prev = last;
582 last->next = next;
583 next = node->first;
585 else
587 if (prev == NULL)
588 *roots = next;
589 else
590 prev->next = next;
591 if (next != NULL)
592 next->prev = prev;
594 ira_free (node);
599 /* Set up fields preorder_num starting with START_NUM in all allocno
600 hard registers nodes in forest given by FIRST. Return biggest set
601 PREORDER_NUM increased by 1. */
602 static int
603 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
604 allocno_hard_regs_node_t parent,
605 int start_num)
607 allocno_hard_regs_node_t node;
609 for (node = first; node != NULL; node = node->next)
611 node->preorder_num = start_num++;
612 node->parent = parent;
613 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
614 start_num);
616 return start_num;
619 /* Number of allocno hard registers nodes in the forest. */
620 static int allocno_hard_regs_nodes_num;
622 /* Table preorder number of allocno hard registers node in the forest
623 -> the allocno hard registers node. */
624 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
626 /* See below. */
627 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
629 /* The structure is used to describes all subnodes (not only immediate
630 ones) in the mentioned above tree for given allocno hard register
631 node. The usage of such data accelerates calculation of
632 colorability of given allocno. */
633 struct allocno_hard_regs_subnode
635 /* The conflict size of conflicting allocnos whose hard register
636 sets are equal sets (plus supersets if given node is given
637 allocno hard registers node) of one in the given node. */
638 int left_conflict_size;
639 /* The summary conflict size of conflicting allocnos whose hard
640 register sets are strict subsets of one in the given node.
641 Overall conflict size is
642 left_conflict_subnodes_size
643 + MIN (max_node_impact - left_conflict_subnodes_size,
644 left_conflict_size)
646 short left_conflict_subnodes_size;
647 short max_node_impact;
650 /* Container for hard regs subnodes of all allocnos. */
651 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
653 /* Table (preorder number of allocno hard registers node in the
654 forest, preorder number of allocno hard registers subnode) -> index
655 of the subnode relative to the node. -1 if it is not a
656 subnode. */
657 static int *allocno_hard_regs_subnode_index;
659 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
660 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
661 static void
662 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
664 allocno_hard_regs_node_t node, parent;
665 int index;
667 for (node = first; node != NULL; node = node->next)
669 allocno_hard_regs_nodes[node->preorder_num] = node;
670 for (parent = node; parent != NULL; parent = parent->parent)
672 index = parent->preorder_num * allocno_hard_regs_nodes_num;
673 allocno_hard_regs_subnode_index[index + node->preorder_num]
674 = node->preorder_num - parent->preorder_num;
676 setup_allocno_hard_regs_subnode_index (node->first);
680 /* Count all allocno hard registers nodes in tree ROOT. */
681 static int
682 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
684 int len = 1;
686 for (root = root->first; root != NULL; root = root->next)
687 len += get_allocno_hard_regs_subnodes_num (root);
688 return len;
691 /* Build the forest of allocno hard registers nodes and assign each
692 allocno a node from the forest. */
693 static void
694 form_allocno_hard_regs_nodes_forest (void)
696 unsigned int i, j, size, len;
697 int start;
698 ira_allocno_t a;
699 allocno_hard_regs_t hv;
700 bitmap_iterator bi;
701 HARD_REG_SET temp;
702 allocno_hard_regs_node_t node, allocno_hard_regs_node;
703 allocno_color_data_t allocno_data;
705 node_check_tick = 0;
706 init_allocno_hard_regs ();
707 hard_regs_roots = NULL;
708 hard_regs_node_vec.create (100);
709 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
710 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
712 CLEAR_HARD_REG_SET (temp);
713 SET_HARD_REG_BIT (temp, i);
714 hv = add_allocno_hard_regs (temp, 0);
715 node = create_new_allocno_hard_regs_node (hv);
716 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
718 start = allocno_hard_regs_vec.length ();
719 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
721 a = ira_allocnos[i];
722 allocno_data = ALLOCNO_COLOR_DATA (a);
724 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
725 continue;
726 hv = (add_allocno_hard_regs
727 (allocno_data->profitable_hard_regs,
728 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
730 SET_HARD_REG_SET (temp);
731 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
732 add_allocno_hard_regs (temp, 0);
733 qsort (allocno_hard_regs_vec.address () + start,
734 allocno_hard_regs_vec.length () - start,
735 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
736 for (i = start;
737 allocno_hard_regs_vec.iterate (i, &hv);
738 i++)
740 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
741 ira_assert (hard_regs_node_vec.length () == 0);
743 /* We need to set up parent fields for right work of
744 first_common_ancestor_node. */
745 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
746 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
748 a = ira_allocnos[i];
749 allocno_data = ALLOCNO_COLOR_DATA (a);
750 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
751 continue;
752 hard_regs_node_vec.truncate (0);
753 collect_allocno_hard_regs_cover (hard_regs_roots,
754 allocno_data->profitable_hard_regs);
755 allocno_hard_regs_node = NULL;
756 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
757 allocno_hard_regs_node
758 = (j == 0
759 ? node
760 : first_common_ancestor_node (node, allocno_hard_regs_node));
761 /* That is a temporary storage. */
762 allocno_hard_regs_node->used_p = true;
763 allocno_data->hard_regs_node = allocno_hard_regs_node;
765 ira_assert (hard_regs_roots->next == NULL);
766 hard_regs_roots->used_p = true;
767 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
768 allocno_hard_regs_nodes_num
769 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
770 allocno_hard_regs_nodes
771 = ((allocno_hard_regs_node_t *)
772 ira_allocate (allocno_hard_regs_nodes_num
773 * sizeof (allocno_hard_regs_node_t)));
774 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
775 allocno_hard_regs_subnode_index
776 = (int *) ira_allocate (size * sizeof (int));
777 for (i = 0; i < size; i++)
778 allocno_hard_regs_subnode_index[i] = -1;
779 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
780 start = 0;
781 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
783 a = ira_allocnos[i];
784 allocno_data = ALLOCNO_COLOR_DATA (a);
785 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
786 continue;
787 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
788 allocno_data->hard_regs_subnodes_start = start;
789 allocno_data->hard_regs_subnodes_num = len;
790 start += len;
792 allocno_hard_regs_subnodes
793 = ((allocno_hard_regs_subnode_t)
794 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
795 hard_regs_node_vec.release ();
798 /* Free tree of allocno hard registers nodes given by its ROOT. */
799 static void
800 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
802 allocno_hard_regs_node_t child, next;
804 for (child = root->first; child != NULL; child = next)
806 next = child->next;
807 finish_allocno_hard_regs_nodes_tree (child);
809 ira_free (root);
812 /* Finish work with the forest of allocno hard registers nodes. */
813 static void
814 finish_allocno_hard_regs_nodes_forest (void)
816 allocno_hard_regs_node_t node, next;
818 ira_free (allocno_hard_regs_subnodes);
819 for (node = hard_regs_roots; node != NULL; node = next)
821 next = node->next;
822 finish_allocno_hard_regs_nodes_tree (node);
824 ira_free (allocno_hard_regs_nodes);
825 ira_free (allocno_hard_regs_subnode_index);
826 finish_allocno_hard_regs ();
829 /* Set up left conflict sizes and left conflict subnodes sizes of hard
830 registers subnodes of allocno A. Return TRUE if allocno A is
831 trivially colorable. */
832 static bool
833 setup_left_conflict_sizes_p (ira_allocno_t a)
835 int i, k, nobj, start;
836 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
837 allocno_color_data_t data;
838 HARD_REG_SET profitable_hard_regs;
839 allocno_hard_regs_subnode_t subnodes;
840 allocno_hard_regs_node_t node;
841 HARD_REG_SET node_set;
843 nobj = ALLOCNO_NUM_OBJECTS (a);
844 data = ALLOCNO_COLOR_DATA (a);
845 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
846 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
847 node = data->hard_regs_node;
848 node_preorder_num = node->preorder_num;
849 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
850 node_check_tick++;
851 for (k = 0; k < nobj; k++)
853 ira_object_t obj = ALLOCNO_OBJECT (a, k);
854 ira_object_t conflict_obj;
855 ira_object_conflict_iterator oci;
857 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
859 int size;
860 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
861 allocno_hard_regs_node_t conflict_node, temp_node;
862 HARD_REG_SET conflict_node_set;
863 allocno_color_data_t conflict_data;
865 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
866 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
867 || ! hard_reg_set_intersect_p (profitable_hard_regs,
868 conflict_data
869 ->profitable_hard_regs))
870 continue;
871 conflict_node = conflict_data->hard_regs_node;
872 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
873 if (hard_reg_set_subset_p (node_set, conflict_node_set))
874 temp_node = node;
875 else
877 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
878 temp_node = conflict_node;
880 if (temp_node->check != node_check_tick)
882 temp_node->check = node_check_tick;
883 temp_node->conflict_size = 0;
885 size = (ira_reg_class_max_nregs
886 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
887 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
888 /* We will deal with the subwords individually. */
889 size = 1;
890 temp_node->conflict_size += size;
893 for (i = 0; i < data->hard_regs_subnodes_num; i++)
895 allocno_hard_regs_node_t temp_node;
897 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
898 ira_assert (temp_node->preorder_num == i + node_preorder_num);
899 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
900 ? 0 : temp_node->conflict_size);
901 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
902 profitable_hard_regs))
903 subnodes[i].max_node_impact = temp_node->hard_regs_num;
904 else
906 HARD_REG_SET temp_set;
907 int j, n, hard_regno;
908 enum reg_class aclass;
910 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
911 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
912 aclass = ALLOCNO_CLASS (a);
913 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
915 hard_regno = ira_class_hard_regs[aclass][j];
916 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
917 n++;
919 subnodes[i].max_node_impact = n;
921 subnodes[i].left_conflict_subnodes_size = 0;
923 start = node_preorder_num * allocno_hard_regs_nodes_num;
924 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
926 int size, parent_i;
927 allocno_hard_regs_node_t parent;
929 size = (subnodes[i].left_conflict_subnodes_size
930 + MIN (subnodes[i].max_node_impact
931 - subnodes[i].left_conflict_subnodes_size,
932 subnodes[i].left_conflict_size));
933 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
934 gcc_checking_assert(parent);
935 parent_i
936 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
937 gcc_checking_assert(parent_i >= 0);
938 subnodes[parent_i].left_conflict_subnodes_size += size;
940 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
941 conflict_size
942 = (left_conflict_subnodes_size
943 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
944 subnodes[0].left_conflict_size));
945 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
946 data->colorable_p = conflict_size <= data->available_regs_num;
947 return data->colorable_p;
950 /* Update left conflict sizes of hard registers subnodes of allocno A
951 after removing allocno REMOVED_A with SIZE from the conflict graph.
952 Return TRUE if A is trivially colorable. */
953 static bool
954 update_left_conflict_sizes_p (ira_allocno_t a,
955 ira_allocno_t removed_a, int size)
957 int i, conflict_size, before_conflict_size, diff, start;
958 int node_preorder_num, parent_i;
959 allocno_hard_regs_node_t node, removed_node, parent;
960 allocno_hard_regs_subnode_t subnodes;
961 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
963 ira_assert (! data->colorable_p);
964 node = data->hard_regs_node;
965 node_preorder_num = node->preorder_num;
966 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
967 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
968 node->hard_regs->set)
969 || hard_reg_set_subset_p (node->hard_regs->set,
970 removed_node->hard_regs->set));
971 start = node_preorder_num * allocno_hard_regs_nodes_num;
972 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
973 if (i < 0)
974 i = 0;
975 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
976 before_conflict_size
977 = (subnodes[i].left_conflict_subnodes_size
978 + MIN (subnodes[i].max_node_impact
979 - subnodes[i].left_conflict_subnodes_size,
980 subnodes[i].left_conflict_size));
981 subnodes[i].left_conflict_size -= size;
982 for (;;)
984 conflict_size
985 = (subnodes[i].left_conflict_subnodes_size
986 + MIN (subnodes[i].max_node_impact
987 - subnodes[i].left_conflict_subnodes_size,
988 subnodes[i].left_conflict_size));
989 if ((diff = before_conflict_size - conflict_size) == 0)
990 break;
991 ira_assert (conflict_size < before_conflict_size);
992 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
993 if (parent == NULL)
994 break;
995 parent_i
996 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
997 if (parent_i < 0)
998 break;
999 i = parent_i;
1000 before_conflict_size
1001 = (subnodes[i].left_conflict_subnodes_size
1002 + MIN (subnodes[i].max_node_impact
1003 - subnodes[i].left_conflict_subnodes_size,
1004 subnodes[i].left_conflict_size));
1005 subnodes[i].left_conflict_subnodes_size -= diff;
1007 if (i != 0
1008 || (conflict_size
1009 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1010 > data->available_regs_num))
1011 return false;
1012 data->colorable_p = true;
1013 return true;
1016 /* Return true if allocno A has empty profitable hard regs. */
1017 static bool
1018 empty_profitable_hard_regs (ira_allocno_t a)
1020 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1022 return hard_reg_set_empty_p (data->profitable_hard_regs);
1025 /* Set up profitable hard registers for each allocno being
1026 colored. */
1027 static void
1028 setup_profitable_hard_regs (void)
1030 unsigned int i;
1031 int j, k, nobj, hard_regno, nregs, class_size;
1032 ira_allocno_t a;
1033 bitmap_iterator bi;
1034 enum reg_class aclass;
1035 machine_mode mode;
1036 allocno_color_data_t data;
1038 /* Initial set up from allocno classes and explicitly conflicting
1039 hard regs. */
1040 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1042 a = ira_allocnos[i];
1043 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1044 continue;
1045 data = ALLOCNO_COLOR_DATA (a);
1046 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1047 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a)
1048 /* Do not empty profitable regs for static chain pointer
1049 pseudo when non-local goto is used. */
1050 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1051 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1052 else
1054 mode = ALLOCNO_MODE (a);
1055 COPY_HARD_REG_SET (data->profitable_hard_regs,
1056 ira_useful_class_mode_regs[aclass][mode]);
1057 nobj = ALLOCNO_NUM_OBJECTS (a);
1058 for (k = 0; k < nobj; k++)
1060 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1062 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1063 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1067 /* Exclude hard regs already assigned for conflicting objects. */
1068 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1070 a = ira_allocnos[i];
1071 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1072 || ! ALLOCNO_ASSIGNED_P (a)
1073 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1074 continue;
1075 mode = ALLOCNO_MODE (a);
1076 nregs = hard_regno_nregs[hard_regno][mode];
1077 nobj = ALLOCNO_NUM_OBJECTS (a);
1078 for (k = 0; k < nobj; k++)
1080 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1081 ira_object_t conflict_obj;
1082 ira_object_conflict_iterator oci;
1084 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1086 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1088 /* We can process the conflict allocno repeatedly with
1089 the same result. */
1090 if (nregs == nobj && nregs > 1)
1092 int num = OBJECT_SUBWORD (conflict_obj);
1094 if (REG_WORDS_BIG_ENDIAN)
1095 CLEAR_HARD_REG_BIT
1096 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1097 hard_regno + nobj - num - 1);
1098 else
1099 CLEAR_HARD_REG_BIT
1100 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1101 hard_regno + num);
1103 else
1104 AND_COMPL_HARD_REG_SET
1105 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1106 ira_reg_mode_hard_regset[hard_regno][mode]);
1110 /* Exclude too costly hard regs. */
1111 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1113 int min_cost = INT_MAX;
1114 int *costs;
1116 a = ira_allocnos[i];
1117 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1118 || empty_profitable_hard_regs (a))
1119 continue;
1120 data = ALLOCNO_COLOR_DATA (a);
1121 mode = ALLOCNO_MODE (a);
1122 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1123 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1125 class_size = ira_class_hard_regs_num[aclass];
1126 for (j = 0; j < class_size; j++)
1128 hard_regno = ira_class_hard_regs[aclass][j];
1129 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1130 hard_regno))
1131 continue;
1132 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j]
1133 /* Do not remove HARD_REGNO for static chain pointer
1134 pseudo when non-local goto is used. */
1135 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1136 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1137 hard_regno);
1138 else if (min_cost > costs[j])
1139 min_cost = costs[j];
1142 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1143 < ALLOCNO_UPDATED_CLASS_COST (a)
1144 /* Do not empty profitable regs for static chain
1145 pointer pseudo when non-local goto is used. */
1146 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1147 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1148 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1149 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1155 /* This page contains functions used to choose hard registers for
1156 allocnos. */
1158 /* Pool for update cost records. */
1159 static object_allocator<update_cost_record> update_cost_record_pool
1160 ("update cost records", 100);
1162 /* Return new update cost record with given params. */
1163 static struct update_cost_record *
1164 get_update_cost_record (int hard_regno, int divisor,
1165 struct update_cost_record *next)
1167 struct update_cost_record *record;
1169 record = update_cost_record_pool.allocate ();
1170 record->hard_regno = hard_regno;
1171 record->divisor = divisor;
1172 record->next = next;
1173 return record;
1176 /* Free memory for all records in LIST. */
1177 static void
1178 free_update_cost_record_list (struct update_cost_record *list)
1180 struct update_cost_record *next;
1182 while (list != NULL)
1184 next = list->next;
1185 update_cost_record_pool.remove (list);
1186 list = next;
1190 /* Free memory allocated for all update cost records. */
1191 static void
1192 finish_update_cost_records (void)
1194 update_cost_record_pool.release ();
1197 /* Array whose element value is TRUE if the corresponding hard
1198 register was already allocated for an allocno. */
1199 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1201 /* Describes one element in a queue of allocnos whose costs need to be
1202 updated. Each allocno in the queue is known to have an allocno
1203 class. */
1204 struct update_cost_queue_elem
1206 /* This element is in the queue iff CHECK == update_cost_check. */
1207 int check;
1209 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1210 connecting this allocno to the one being allocated. */
1211 int divisor;
1213 /* Allocno from which we are chaining costs of connected allocnos.
1214 It is used not go back in graph of allocnos connected by
1215 copies. */
1216 ira_allocno_t from;
1218 /* The next allocno in the queue, or null if this is the last element. */
1219 ira_allocno_t next;
1222 /* The first element in a queue of allocnos whose copy costs need to be
1223 updated. Null if the queue is empty. */
1224 static ira_allocno_t update_cost_queue;
1226 /* The last element in the queue described by update_cost_queue.
1227 Not valid if update_cost_queue is null. */
1228 static struct update_cost_queue_elem *update_cost_queue_tail;
1230 /* A pool of elements in the queue described by update_cost_queue.
1231 Elements are indexed by ALLOCNO_NUM. */
1232 static struct update_cost_queue_elem *update_cost_queue_elems;
1234 /* The current value of update_costs_from_copies call count. */
1235 static int update_cost_check;
1237 /* Allocate and initialize data necessary for function
1238 update_costs_from_copies. */
1239 static void
1240 initiate_cost_update (void)
1242 size_t size;
1244 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1245 update_cost_queue_elems
1246 = (struct update_cost_queue_elem *) ira_allocate (size);
1247 memset (update_cost_queue_elems, 0, size);
1248 update_cost_check = 0;
1251 /* Deallocate data used by function update_costs_from_copies. */
1252 static void
1253 finish_cost_update (void)
1255 ira_free (update_cost_queue_elems);
1256 finish_update_cost_records ();
1259 /* When we traverse allocnos to update hard register costs, the cost
1260 divisor will be multiplied by the following macro value for each
1261 hop from given allocno to directly connected allocnos. */
1262 #define COST_HOP_DIVISOR 4
1264 /* Start a new cost-updating pass. */
1265 static void
1266 start_update_cost (void)
1268 update_cost_check++;
1269 update_cost_queue = NULL;
1272 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1273 ALLOCNO is already in the queue, or has NO_REGS class. */
1274 static inline void
1275 queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
1277 struct update_cost_queue_elem *elem;
1279 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1280 if (elem->check != update_cost_check
1281 && ALLOCNO_CLASS (allocno) != NO_REGS)
1283 elem->check = update_cost_check;
1284 elem->from = from;
1285 elem->divisor = divisor;
1286 elem->next = NULL;
1287 if (update_cost_queue == NULL)
1288 update_cost_queue = allocno;
1289 else
1290 update_cost_queue_tail->next = allocno;
1291 update_cost_queue_tail = elem;
1295 /* Try to remove the first element from update_cost_queue. Return
1296 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1297 *DIVISOR) describe the removed element. */
1298 static inline bool
1299 get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
1301 struct update_cost_queue_elem *elem;
1303 if (update_cost_queue == NULL)
1304 return false;
1306 *allocno = update_cost_queue;
1307 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1308 *from = elem->from;
1309 *divisor = elem->divisor;
1310 update_cost_queue = elem->next;
1311 return true;
1314 /* Increase costs of HARD_REGNO by UPDATE_COST for ALLOCNO. Return
1315 true if we really modified the cost. */
1316 static bool
1317 update_allocno_cost (ira_allocno_t allocno, int hard_regno, int update_cost)
1319 int i;
1320 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1322 i = ira_class_hard_reg_index[aclass][hard_regno];
1323 if (i < 0)
1324 return false;
1325 ira_allocate_and_set_or_copy_costs
1326 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1327 ALLOCNO_UPDATED_CLASS_COST (allocno),
1328 ALLOCNO_HARD_REG_COSTS (allocno));
1329 ira_allocate_and_set_or_copy_costs
1330 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1331 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1332 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
1333 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_cost;
1334 return true;
1337 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1338 by copies to ALLOCNO to increase chances to remove some copies as
1339 the result of subsequent assignment. Record cost updates if
1340 RECORD_P is true. */
1341 static void
1342 update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1343 int divisor, bool decr_p, bool record_p)
1345 int cost, update_cost;
1346 machine_mode mode;
1347 enum reg_class rclass, aclass;
1348 ira_allocno_t another_allocno, from = NULL;
1349 ira_copy_t cp, next_cp;
1351 rclass = REGNO_REG_CLASS (hard_regno);
1354 mode = ALLOCNO_MODE (allocno);
1355 ira_init_register_move_cost_if_necessary (mode);
1356 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1358 if (cp->first == allocno)
1360 next_cp = cp->next_first_allocno_copy;
1361 another_allocno = cp->second;
1363 else if (cp->second == allocno)
1365 next_cp = cp->next_second_allocno_copy;
1366 another_allocno = cp->first;
1368 else
1369 gcc_unreachable ();
1371 if (another_allocno == from)
1372 continue;
1374 aclass = ALLOCNO_CLASS (another_allocno);
1375 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1376 hard_regno)
1377 || ALLOCNO_ASSIGNED_P (another_allocno))
1378 continue;
1380 cost = (cp->second == allocno
1381 ? ira_register_move_cost[mode][rclass][aclass]
1382 : ira_register_move_cost[mode][aclass][rclass]);
1383 if (decr_p)
1384 cost = -cost;
1386 update_cost = cp->freq * cost / divisor;
1387 if (update_cost == 0)
1388 continue;
1390 if (! update_allocno_cost (another_allocno, hard_regno, update_cost))
1391 continue;
1392 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1393 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1394 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1395 = get_update_cost_record (hard_regno, divisor,
1396 ALLOCNO_COLOR_DATA (another_allocno)
1397 ->update_cost_records);
1400 while (get_next_update_cost (&allocno, &from, &divisor));
1403 /* Decrease preferred ALLOCNO hard register costs and costs of
1404 allocnos connected to ALLOCNO through copy. */
1405 static void
1406 update_costs_from_prefs (ira_allocno_t allocno)
1408 ira_pref_t pref;
1410 start_update_cost ();
1411 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1412 update_costs_from_allocno (allocno, pref->hard_regno,
1413 COST_HOP_DIVISOR, true, true);
1416 /* Update (decrease if DECR_P) the cost of allocnos connected to
1417 ALLOCNO through copies to increase chances to remove some copies as
1418 the result of subsequent assignment. ALLOCNO was just assigned to
1419 a hard register. Record cost updates if RECORD_P is true. */
1420 static void
1421 update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1423 int hard_regno;
1425 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1426 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1427 start_update_cost ();
1428 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1431 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1432 before updating costs of these allocnos from given allocno. This
1433 is a wise thing to do as if given allocno did not get an expected
1434 hard reg, using smaller cost of the hard reg for allocnos connected
1435 by copies to given allocno becomes actually misleading. Free all
1436 update cost records for ALLOCNO as we don't need them anymore. */
1437 static void
1438 restore_costs_from_copies (ira_allocno_t allocno)
1440 struct update_cost_record *records, *curr;
1442 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1443 return;
1444 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1445 start_update_cost ();
1446 for (curr = records; curr != NULL; curr = curr->next)
1447 update_costs_from_allocno (allocno, curr->hard_regno,
1448 curr->divisor, true, false);
1449 free_update_cost_record_list (records);
1450 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
1453 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1454 of ACLASS by conflict costs of the unassigned allocnos
1455 connected by copies with allocnos in update_cost_queue. This
1456 update increases chances to remove some copies. */
1457 static void
1458 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1459 bool decr_p)
1461 int i, cost, class_size, freq, mult, div, divisor;
1462 int index, hard_regno;
1463 int *conflict_costs;
1464 bool cont_p;
1465 enum reg_class another_aclass;
1466 ira_allocno_t allocno, another_allocno, from;
1467 ira_copy_t cp, next_cp;
1469 while (get_next_update_cost (&allocno, &from, &divisor))
1470 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1472 if (cp->first == allocno)
1474 next_cp = cp->next_first_allocno_copy;
1475 another_allocno = cp->second;
1477 else if (cp->second == allocno)
1479 next_cp = cp->next_second_allocno_copy;
1480 another_allocno = cp->first;
1482 else
1483 gcc_unreachable ();
1485 if (another_allocno == from)
1486 continue;
1488 another_aclass = ALLOCNO_CLASS (another_allocno);
1489 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1490 || ALLOCNO_ASSIGNED_P (another_allocno)
1491 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1492 continue;
1493 class_size = ira_class_hard_regs_num[another_aclass];
1494 ira_allocate_and_copy_costs
1495 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1496 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1497 conflict_costs
1498 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1499 if (conflict_costs == NULL)
1500 cont_p = true;
1501 else
1503 mult = cp->freq;
1504 freq = ALLOCNO_FREQ (another_allocno);
1505 if (freq == 0)
1506 freq = 1;
1507 div = freq * divisor;
1508 cont_p = false;
1509 for (i = class_size - 1; i >= 0; i--)
1511 hard_regno = ira_class_hard_regs[another_aclass][i];
1512 ira_assert (hard_regno >= 0);
1513 index = ira_class_hard_reg_index[aclass][hard_regno];
1514 if (index < 0)
1515 continue;
1516 cost = (int) ((unsigned) conflict_costs [i] * mult) / div;
1517 if (cost == 0)
1518 continue;
1519 cont_p = true;
1520 if (decr_p)
1521 cost = -cost;
1522 costs[index] += cost;
1525 /* Probably 5 hops will be enough. */
1526 if (cont_p
1527 && divisor <= (COST_HOP_DIVISOR
1528 * COST_HOP_DIVISOR
1529 * COST_HOP_DIVISOR
1530 * COST_HOP_DIVISOR))
1531 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1535 /* Set up conflicting (through CONFLICT_REGS) for each object of
1536 allocno A and the start allocno profitable regs (through
1537 START_PROFITABLE_REGS). Remember that the start profitable regs
1538 exclude hard regs which can not hold value of mode of allocno A.
1539 This covers mostly cases when multi-register value should be
1540 aligned. */
1541 static inline void
1542 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1543 HARD_REG_SET *conflict_regs,
1544 HARD_REG_SET *start_profitable_regs)
1546 int i, nwords;
1547 ira_object_t obj;
1549 nwords = ALLOCNO_NUM_OBJECTS (a);
1550 for (i = 0; i < nwords; i++)
1552 obj = ALLOCNO_OBJECT (a, i);
1553 COPY_HARD_REG_SET (conflict_regs[i],
1554 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1556 if (retry_p)
1558 COPY_HARD_REG_SET (*start_profitable_regs,
1559 reg_class_contents[ALLOCNO_CLASS (a)]);
1560 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1561 ira_prohibited_class_mode_regs
1562 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1564 else
1565 COPY_HARD_REG_SET (*start_profitable_regs,
1566 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1569 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1570 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1571 static inline bool
1572 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1573 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1575 int j, nwords, nregs;
1576 enum reg_class aclass;
1577 machine_mode mode;
1579 aclass = ALLOCNO_CLASS (a);
1580 mode = ALLOCNO_MODE (a);
1581 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1582 hard_regno))
1583 return false;
1584 /* Checking only profitable hard regs. */
1585 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1586 return false;
1587 nregs = hard_regno_nregs[hard_regno][mode];
1588 nwords = ALLOCNO_NUM_OBJECTS (a);
1589 for (j = 0; j < nregs; j++)
1591 int k;
1592 int set_to_test_start = 0, set_to_test_end = nwords;
1594 if (nregs == nwords)
1596 if (REG_WORDS_BIG_ENDIAN)
1597 set_to_test_start = nwords - j - 1;
1598 else
1599 set_to_test_start = j;
1600 set_to_test_end = set_to_test_start + 1;
1602 for (k = set_to_test_start; k < set_to_test_end; k++)
1603 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1604 break;
1605 if (k != set_to_test_end)
1606 break;
1608 return j == nregs;
1611 /* Return number of registers needed to be saved and restored at
1612 function prologue/epilogue if we allocate HARD_REGNO to hold value
1613 of MODE. */
1614 static int
1615 calculate_saved_nregs (int hard_regno, machine_mode mode)
1617 int i;
1618 int nregs = 0;
1620 ira_assert (hard_regno >= 0);
1621 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1622 if (!allocated_hardreg_p[hard_regno + i]
1623 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1624 && !LOCAL_REGNO (hard_regno + i))
1625 nregs++;
1626 return nregs;
1629 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1630 that the function called from function
1631 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1632 this case some allocno data are not defined or updated and we
1633 should not touch these data. The function returns true if we
1634 managed to assign a hard register to the allocno.
1636 To assign a hard register, first of all we calculate all conflict
1637 hard registers which can come from conflicting allocnos with
1638 already assigned hard registers. After that we find first free
1639 hard register with the minimal cost. During hard register cost
1640 calculation we take conflict hard register costs into account to
1641 give a chance for conflicting allocnos to get a better hard
1642 register in the future.
1644 If the best hard register cost is bigger than cost of memory usage
1645 for the allocno, we don't assign a hard register to given allocno
1646 at all.
1648 If we assign a hard register to the allocno, we update costs of the
1649 hard register for allocnos connected by copies to improve a chance
1650 to coalesce insns represented by the copies when we assign hard
1651 registers to the allocnos connected by the copies. */
1652 static bool
1653 assign_hard_reg (ira_allocno_t a, bool retry_p)
1655 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1656 int i, j, hard_regno, best_hard_regno, class_size;
1657 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1658 int *a_costs;
1659 enum reg_class aclass;
1660 machine_mode mode;
1661 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1662 int saved_nregs;
1663 enum reg_class rclass;
1664 int add_cost;
1665 #ifdef STACK_REGS
1666 bool no_stack_reg_p;
1667 #endif
1669 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1670 get_conflict_and_start_profitable_regs (a, retry_p,
1671 conflicting_regs,
1672 &profitable_hard_regs);
1673 aclass = ALLOCNO_CLASS (a);
1674 class_size = ira_class_hard_regs_num[aclass];
1675 best_hard_regno = -1;
1676 memset (full_costs, 0, sizeof (int) * class_size);
1677 mem_cost = 0;
1678 memset (costs, 0, sizeof (int) * class_size);
1679 memset (full_costs, 0, sizeof (int) * class_size);
1680 #ifdef STACK_REGS
1681 no_stack_reg_p = false;
1682 #endif
1683 if (! retry_p)
1684 start_update_cost ();
1685 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1687 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1688 aclass, ALLOCNO_HARD_REG_COSTS (a));
1689 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1690 #ifdef STACK_REGS
1691 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1692 #endif
1693 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1694 for (i = 0; i < class_size; i++)
1695 if (a_costs != NULL)
1697 costs[i] += a_costs[i];
1698 full_costs[i] += a_costs[i];
1700 else
1702 costs[i] += cost;
1703 full_costs[i] += cost;
1705 nwords = ALLOCNO_NUM_OBJECTS (a);
1706 curr_allocno_process++;
1707 for (word = 0; word < nwords; word++)
1709 ira_object_t conflict_obj;
1710 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1711 ira_object_conflict_iterator oci;
1713 /* Take preferences of conflicting allocnos into account. */
1714 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1716 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1717 enum reg_class conflict_aclass;
1718 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a);
1720 /* Reload can give another class so we need to check all
1721 allocnos. */
1722 if (!retry_p
1723 && ((!ALLOCNO_ASSIGNED_P (conflict_a)
1724 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1725 && !(hard_reg_set_intersect_p
1726 (profitable_hard_regs,
1727 ALLOCNO_COLOR_DATA
1728 (conflict_a)->profitable_hard_regs))))
1730 /* All conflict allocnos are in consideration bitmap
1731 when retry_p is false. It might change in future and
1732 if it happens the assert will be broken. It means
1733 the code should be modified for the new
1734 assumptions. */
1735 ira_assert (bitmap_bit_p (consideration_allocno_bitmap,
1736 ALLOCNO_NUM (conflict_a)));
1737 continue;
1739 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1740 ira_assert (ira_reg_classes_intersect_p
1741 [aclass][conflict_aclass]);
1742 if (ALLOCNO_ASSIGNED_P (conflict_a))
1744 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1745 if (hard_regno >= 0
1746 && (ira_hard_reg_set_intersection_p
1747 (hard_regno, ALLOCNO_MODE (conflict_a),
1748 reg_class_contents[aclass])))
1750 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1751 int conflict_nregs;
1753 mode = ALLOCNO_MODE (conflict_a);
1754 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1755 if (conflict_nregs == n_objects && conflict_nregs > 1)
1757 int num = OBJECT_SUBWORD (conflict_obj);
1759 if (REG_WORDS_BIG_ENDIAN)
1760 SET_HARD_REG_BIT (conflicting_regs[word],
1761 hard_regno + n_objects - num - 1);
1762 else
1763 SET_HARD_REG_BIT (conflicting_regs[word],
1764 hard_regno + num);
1766 else
1767 IOR_HARD_REG_SET
1768 (conflicting_regs[word],
1769 ira_reg_mode_hard_regset[hard_regno][mode]);
1770 if (hard_reg_set_subset_p (profitable_hard_regs,
1771 conflicting_regs[word]))
1772 goto fail;
1775 else if (! retry_p
1776 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1777 /* Don't process the conflict allocno twice. */
1778 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1779 != curr_allocno_process))
1781 int k, *conflict_costs;
1783 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1784 = curr_allocno_process;
1785 ira_allocate_and_copy_costs
1786 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1787 conflict_aclass,
1788 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1789 conflict_costs
1790 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1791 if (conflict_costs != NULL)
1792 for (j = class_size - 1; j >= 0; j--)
1794 hard_regno = ira_class_hard_regs[aclass][j];
1795 ira_assert (hard_regno >= 0);
1796 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1797 if (k < 0
1798 /* If HARD_REGNO is not available for CONFLICT_A,
1799 the conflict would be ignored, since HARD_REGNO
1800 will never be assigned to CONFLICT_A. */
1801 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
1802 hard_regno))
1803 continue;
1804 full_costs[j] -= conflict_costs[k];
1806 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1811 if (! retry_p)
1812 /* Take into account preferences of allocnos connected by copies to
1813 the conflict allocnos. */
1814 update_conflict_hard_regno_costs (full_costs, aclass, true);
1816 /* Take preferences of allocnos connected by copies into
1817 account. */
1818 if (! retry_p)
1820 start_update_cost ();
1821 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1822 update_conflict_hard_regno_costs (full_costs, aclass, false);
1824 min_cost = min_full_cost = INT_MAX;
1825 /* We don't care about giving callee saved registers to allocnos no
1826 living through calls because call clobbered registers are
1827 allocated first (it is usual practice to put them first in
1828 REG_ALLOC_ORDER). */
1829 mode = ALLOCNO_MODE (a);
1830 for (i = 0; i < class_size; i++)
1832 hard_regno = ira_class_hard_regs[aclass][i];
1833 #ifdef STACK_REGS
1834 if (no_stack_reg_p
1835 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1836 continue;
1837 #endif
1838 if (! check_hard_reg_p (a, hard_regno,
1839 conflicting_regs, profitable_hard_regs))
1840 continue;
1841 cost = costs[i];
1842 full_cost = full_costs[i];
1843 if (!HONOR_REG_ALLOC_ORDER)
1845 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1846 /* We need to save/restore the hard register in
1847 epilogue/prologue. Therefore we increase the cost. */
1849 rclass = REGNO_REG_CLASS (hard_regno);
1850 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1851 + ira_memory_move_cost[mode][rclass][1])
1852 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1853 cost += add_cost;
1854 full_cost += add_cost;
1857 if (min_cost > cost)
1858 min_cost = cost;
1859 if (min_full_cost > full_cost)
1861 min_full_cost = full_cost;
1862 best_hard_regno = hard_regno;
1863 ira_assert (hard_regno >= 0);
1866 if (min_full_cost > mem_cost
1867 /* Do not spill static chain pointer pseudo when non-local goto
1868 is used. */
1869 && ! non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a)))
1871 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1872 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1873 mem_cost, min_full_cost);
1874 best_hard_regno = -1;
1876 fail:
1877 if (best_hard_regno >= 0)
1879 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1880 allocated_hardreg_p[best_hard_regno + i] = true;
1882 if (! retry_p)
1883 restore_costs_from_copies (a);
1884 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1885 ALLOCNO_ASSIGNED_P (a) = true;
1886 if (best_hard_regno >= 0)
1887 update_costs_from_copies (a, true, ! retry_p);
1888 ira_assert (ALLOCNO_CLASS (a) == aclass);
1889 /* We don't need updated costs anymore. */
1890 ira_free_allocno_updated_costs (a);
1891 return best_hard_regno >= 0;
1896 /* An array used to sort copies. */
1897 static ira_copy_t *sorted_copies;
1899 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1900 used to find a conflict for new allocnos or allocnos with the
1901 different allocno classes. */
1902 static bool
1903 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1905 rtx reg1, reg2;
1906 int i, j;
1907 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1908 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1910 if (a1 == a2)
1911 return false;
1912 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1913 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1914 if (reg1 != NULL && reg2 != NULL
1915 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1916 return false;
1918 for (i = 0; i < n1; i++)
1920 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1922 for (j = 0; j < n2; j++)
1924 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1926 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1927 OBJECT_LIVE_RANGES (c2)))
1928 return true;
1931 return false;
1934 /* The function is used to sort copies according to their execution
1935 frequencies. */
1936 static int
1937 copy_freq_compare_func (const void *v1p, const void *v2p)
1939 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1940 int pri1, pri2;
1942 pri1 = cp1->freq;
1943 pri2 = cp2->freq;
1944 if (pri2 - pri1)
1945 return pri2 - pri1;
1947 /* If frequencies are equal, sort by copies, so that the results of
1948 qsort leave nothing to chance. */
1949 return cp1->num - cp2->num;
1954 /* Return true if any allocno from thread of A1 conflicts with any
1955 allocno from thread A2. */
1956 static bool
1957 allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1959 ira_allocno_t a, conflict_a;
1961 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
1962 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1964 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
1965 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
1967 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
1968 return true;
1969 if (conflict_a == a1)
1970 break;
1972 if (a == a2)
1973 break;
1975 return false;
1978 /* Merge two threads given correspondingly by their first allocnos T1
1979 and T2 (more accurately merging T2 into T1). */
1980 static void
1981 merge_threads (ira_allocno_t t1, ira_allocno_t t2)
1983 ira_allocno_t a, next, last;
1985 gcc_assert (t1 != t2
1986 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
1987 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
1988 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
1989 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1991 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
1992 if (a == t2)
1993 break;
1994 last = a;
1996 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
1997 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
1998 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
1999 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
2002 /* Create threads by processing CP_NUM copies from sorted copies. We
2003 process the most expensive copies first. */
2004 static void
2005 form_threads_from_copies (int cp_num)
2007 ira_allocno_t a, thread1, thread2;
2008 ira_copy_t cp;
2009 int i, n;
2011 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2012 /* Form threads processing copies, most frequently executed
2013 first. */
2014 for (; cp_num != 0;)
2016 for (i = 0; i < cp_num; i++)
2018 cp = sorted_copies[i];
2019 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
2020 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2021 if (thread1 == thread2)
2022 continue;
2023 if (! allocno_thread_conflict_p (thread1, thread2))
2025 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2026 fprintf
2027 (ira_dump_file,
2028 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2029 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2030 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2031 cp->freq);
2032 merge_threads (thread1, thread2);
2033 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2035 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2036 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2037 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2038 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2039 ALLOCNO_FREQ (thread1));
2040 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2041 a != thread1;
2042 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2043 fprintf (ira_dump_file, " a%dr%d(%d)",
2044 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2045 ALLOCNO_FREQ (a));
2046 fprintf (ira_dump_file, "\n");
2048 i++;
2049 break;
2052 /* Collect the rest of copies. */
2053 for (n = 0; i < cp_num; i++)
2055 cp = sorted_copies[i];
2056 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2057 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2058 sorted_copies[n++] = cp;
2060 cp_num = n;
2064 /* Create threads by processing copies of all alocnos from BUCKET. We
2065 process the most expensive copies first. */
2066 static void
2067 form_threads_from_bucket (ira_allocno_t bucket)
2069 ira_allocno_t a;
2070 ira_copy_t cp, next_cp;
2071 int cp_num = 0;
2073 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2075 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2077 if (cp->first == a)
2079 next_cp = cp->next_first_allocno_copy;
2080 sorted_copies[cp_num++] = cp;
2082 else if (cp->second == a)
2083 next_cp = cp->next_second_allocno_copy;
2084 else
2085 gcc_unreachable ();
2088 form_threads_from_copies (cp_num);
2091 /* Create threads by processing copies of colorable allocno A. We
2092 process most expensive copies first. */
2093 static void
2094 form_threads_from_colorable_allocno (ira_allocno_t a)
2096 ira_allocno_t another_a;
2097 ira_copy_t cp, next_cp;
2098 int cp_num = 0;
2100 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2102 if (cp->first == a)
2104 next_cp = cp->next_first_allocno_copy;
2105 another_a = cp->second;
2107 else if (cp->second == a)
2109 next_cp = cp->next_second_allocno_copy;
2110 another_a = cp->first;
2112 else
2113 gcc_unreachable ();
2114 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2115 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2116 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2117 sorted_copies[cp_num++] = cp;
2119 form_threads_from_copies (cp_num);
2122 /* Form initial threads which contain only one allocno. */
2123 static void
2124 init_allocno_threads (void)
2126 ira_allocno_t a;
2127 unsigned int j;
2128 bitmap_iterator bi;
2130 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2132 a = ira_allocnos[j];
2133 /* Set up initial thread data: */
2134 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2135 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2136 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2142 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2144 /* Bucket of allocnos that can colored currently without spilling. */
2145 static ira_allocno_t colorable_allocno_bucket;
2147 /* Bucket of allocnos that might be not colored currently without
2148 spilling. */
2149 static ira_allocno_t uncolorable_allocno_bucket;
2151 /* The current number of allocnos in the uncolorable_bucket. */
2152 static int uncolorable_allocnos_num;
2154 /* Return the current spill priority of allocno A. The less the
2155 number, the more preferable the allocno for spilling. */
2156 static inline int
2157 allocno_spill_priority (ira_allocno_t a)
2159 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2161 return (data->temp
2162 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2163 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
2164 + 1));
2167 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2168 before the call. */
2169 static void
2170 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2172 ira_allocno_t first_a;
2173 allocno_color_data_t data;
2175 if (bucket_ptr == &uncolorable_allocno_bucket
2176 && ALLOCNO_CLASS (a) != NO_REGS)
2178 uncolorable_allocnos_num++;
2179 ira_assert (uncolorable_allocnos_num > 0);
2181 first_a = *bucket_ptr;
2182 data = ALLOCNO_COLOR_DATA (a);
2183 data->next_bucket_allocno = first_a;
2184 data->prev_bucket_allocno = NULL;
2185 if (first_a != NULL)
2186 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2187 *bucket_ptr = a;
2190 /* Compare two allocnos to define which allocno should be pushed first
2191 into the coloring stack. If the return is a negative number, the
2192 allocno given by the first parameter will be pushed first. In this
2193 case such allocno has less priority than the second one and the
2194 hard register will be assigned to it after assignment to the second
2195 one. As the result of such assignment order, the second allocno
2196 has a better chance to get the best hard register. */
2197 static int
2198 bucket_allocno_compare_func (const void *v1p, const void *v2p)
2200 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2201 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2202 int diff, freq1, freq2, a1_num, a2_num;
2203 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2204 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
2205 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2207 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2208 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2209 if ((diff = freq1 - freq2) != 0)
2210 return diff;
2212 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2213 return diff;
2215 /* Push pseudos requiring less hard registers first. It means that
2216 we will assign pseudos requiring more hard registers first
2217 avoiding creation small holes in free hard register file into
2218 which the pseudos requiring more hard registers can not fit. */
2219 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2220 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
2221 return diff;
2223 freq1 = ALLOCNO_FREQ (a1);
2224 freq2 = ALLOCNO_FREQ (a2);
2225 if ((diff = freq1 - freq2) != 0)
2226 return diff;
2228 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2229 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2230 if ((diff = a2_num - a1_num) != 0)
2231 return diff;
2232 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2235 /* Sort bucket *BUCKET_PTR and return the result through
2236 BUCKET_PTR. */
2237 static void
2238 sort_bucket (ira_allocno_t *bucket_ptr,
2239 int (*compare_func) (const void *, const void *))
2241 ira_allocno_t a, head;
2242 int n;
2244 for (n = 0, a = *bucket_ptr;
2245 a != NULL;
2246 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2247 sorted_allocnos[n++] = a;
2248 if (n <= 1)
2249 return;
2250 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
2251 head = NULL;
2252 for (n--; n >= 0; n--)
2254 a = sorted_allocnos[n];
2255 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2256 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
2257 if (head != NULL)
2258 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
2259 head = a;
2261 *bucket_ptr = head;
2264 /* Add ALLOCNO to colorable bucket maintaining the order according
2265 their priority. ALLOCNO should be not in a bucket before the
2266 call. */
2267 static void
2268 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2270 ira_allocno_t before, after;
2272 form_threads_from_colorable_allocno (allocno);
2273 for (before = colorable_allocno_bucket, after = NULL;
2274 before != NULL;
2275 after = before,
2276 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
2277 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2278 break;
2279 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2280 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
2281 if (after == NULL)
2282 colorable_allocno_bucket = allocno;
2283 else
2284 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
2285 if (before != NULL)
2286 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
2289 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2290 the call. */
2291 static void
2292 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2294 ira_allocno_t prev_allocno, next_allocno;
2296 if (bucket_ptr == &uncolorable_allocno_bucket
2297 && ALLOCNO_CLASS (allocno) != NO_REGS)
2299 uncolorable_allocnos_num--;
2300 ira_assert (uncolorable_allocnos_num >= 0);
2302 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2303 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
2304 if (prev_allocno != NULL)
2305 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
2306 else
2308 ira_assert (*bucket_ptr == allocno);
2309 *bucket_ptr = next_allocno;
2311 if (next_allocno != NULL)
2312 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
2315 /* Put allocno A onto the coloring stack without removing it from its
2316 bucket. Pushing allocno to the coloring stack can result in moving
2317 conflicting allocnos from the uncolorable bucket to the colorable
2318 one. */
2319 static void
2320 push_allocno_to_stack (ira_allocno_t a)
2322 enum reg_class aclass;
2323 allocno_color_data_t data, conflict_data;
2324 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2326 data = ALLOCNO_COLOR_DATA (a);
2327 data->in_graph_p = false;
2328 allocno_stack_vec.safe_push (a);
2329 aclass = ALLOCNO_CLASS (a);
2330 if (aclass == NO_REGS)
2331 return;
2332 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2333 if (n > 1)
2335 /* We will deal with the subwords individually. */
2336 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
2337 size = 1;
2339 for (i = 0; i < n; i++)
2341 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2342 ira_object_t conflict_obj;
2343 ira_object_conflict_iterator oci;
2345 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2347 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2349 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
2350 if (conflict_data->colorable_p
2351 || ! conflict_data->in_graph_p
2352 || ALLOCNO_ASSIGNED_P (conflict_a)
2353 || !(hard_reg_set_intersect_p
2354 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2355 conflict_data->profitable_hard_regs)))
2356 continue;
2357 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2358 ALLOCNO_NUM (conflict_a)));
2359 if (update_left_conflict_sizes_p (conflict_a, a, size))
2361 delete_allocno_from_bucket
2362 (conflict_a, &uncolorable_allocno_bucket);
2363 add_allocno_to_ordered_colorable_bucket (conflict_a);
2364 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2366 fprintf (ira_dump_file, " Making");
2367 ira_print_expanded_allocno (conflict_a);
2368 fprintf (ira_dump_file, " colorable\n");
2376 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2377 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2378 static void
2379 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2381 if (colorable_p)
2382 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2383 else
2384 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2385 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2387 fprintf (ira_dump_file, " Pushing");
2388 ira_print_expanded_allocno (allocno);
2389 if (colorable_p)
2390 fprintf (ira_dump_file, "(cost %d)\n",
2391 ALLOCNO_COLOR_DATA (allocno)->temp);
2392 else
2393 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2394 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
2395 allocno_spill_priority (allocno),
2396 ALLOCNO_COLOR_DATA (allocno)->temp);
2398 if (! colorable_p)
2399 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
2400 push_allocno_to_stack (allocno);
2403 /* Put all allocnos from colorable bucket onto the coloring stack. */
2404 static void
2405 push_only_colorable (void)
2407 form_threads_from_bucket (colorable_allocno_bucket);
2408 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2409 for (;colorable_allocno_bucket != NULL;)
2410 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2413 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2414 loop given by its LOOP_NODE. */
2416 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2418 int freq, i;
2419 edge_iterator ei;
2420 edge e;
2421 vec<edge> edges;
2423 ira_assert (current_loops != NULL && loop_node->loop != NULL
2424 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2425 freq = 0;
2426 if (! exit_p)
2428 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2429 if (e->src != loop_node->loop->latch
2430 && (regno < 0
2431 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2432 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2433 freq += EDGE_FREQUENCY (e);
2435 else
2437 edges = get_loop_exit_edges (loop_node->loop);
2438 FOR_EACH_VEC_ELT (edges, i, e)
2439 if (regno < 0
2440 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2441 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2442 freq += EDGE_FREQUENCY (e);
2443 edges.release ();
2446 return REG_FREQ_FROM_EDGE_FREQ (freq);
2449 /* Calculate and return the cost of putting allocno A into memory. */
2450 static int
2451 calculate_allocno_spill_cost (ira_allocno_t a)
2453 int regno, cost;
2454 machine_mode mode;
2455 enum reg_class rclass;
2456 ira_allocno_t parent_allocno;
2457 ira_loop_tree_node_t parent_node, loop_node;
2459 regno = ALLOCNO_REGNO (a);
2460 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2461 if (ALLOCNO_CAP (a) != NULL)
2462 return cost;
2463 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2464 if ((parent_node = loop_node->parent) == NULL)
2465 return cost;
2466 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2467 return cost;
2468 mode = ALLOCNO_MODE (a);
2469 rclass = ALLOCNO_CLASS (a);
2470 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2471 cost -= (ira_memory_move_cost[mode][rclass][0]
2472 * ira_loop_edge_freq (loop_node, regno, true)
2473 + ira_memory_move_cost[mode][rclass][1]
2474 * ira_loop_edge_freq (loop_node, regno, false));
2475 else
2477 ira_init_register_move_cost_if_necessary (mode);
2478 cost += ((ira_memory_move_cost[mode][rclass][1]
2479 * ira_loop_edge_freq (loop_node, regno, true)
2480 + ira_memory_move_cost[mode][rclass][0]
2481 * ira_loop_edge_freq (loop_node, regno, false))
2482 - (ira_register_move_cost[mode][rclass][rclass]
2483 * (ira_loop_edge_freq (loop_node, regno, false)
2484 + ira_loop_edge_freq (loop_node, regno, true))));
2486 return cost;
2489 /* Used for sorting allocnos for spilling. */
2490 static inline int
2491 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2493 int pri1, pri2, diff;
2495 /* Avoid spilling static chain pointer pseudo when non-local goto is
2496 used. */
2497 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))
2498 return 1;
2499 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)))
2500 return -1;
2501 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2502 return 1;
2503 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2504 return -1;
2505 pri1 = allocno_spill_priority (a1);
2506 pri2 = allocno_spill_priority (a2);
2507 if ((diff = pri1 - pri2) != 0)
2508 return diff;
2509 if ((diff
2510 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2511 return diff;
2512 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2515 /* Used for sorting allocnos for spilling. */
2516 static int
2517 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2519 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2520 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2522 return allocno_spill_priority_compare (p1, p2);
2525 /* Push allocnos to the coloring stack. The order of allocnos in the
2526 stack defines the order for the subsequent coloring. */
2527 static void
2528 push_allocnos_to_stack (void)
2530 ira_allocno_t a;
2531 int cost;
2533 /* Calculate uncolorable allocno spill costs. */
2534 for (a = uncolorable_allocno_bucket;
2535 a != NULL;
2536 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2537 if (ALLOCNO_CLASS (a) != NO_REGS)
2539 cost = calculate_allocno_spill_cost (a);
2540 /* ??? Remove cost of copies between the coalesced
2541 allocnos. */
2542 ALLOCNO_COLOR_DATA (a)->temp = cost;
2544 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2545 for (;;)
2547 push_only_colorable ();
2548 a = uncolorable_allocno_bucket;
2549 if (a == NULL)
2550 break;
2551 remove_allocno_from_bucket_and_push (a, false);
2553 ira_assert (colorable_allocno_bucket == NULL
2554 && uncolorable_allocno_bucket == NULL);
2555 ira_assert (uncolorable_allocnos_num == 0);
2558 /* Pop the coloring stack and assign hard registers to the popped
2559 allocnos. */
2560 static void
2561 pop_allocnos_from_stack (void)
2563 ira_allocno_t allocno;
2564 enum reg_class aclass;
2566 for (;allocno_stack_vec.length () != 0;)
2568 allocno = allocno_stack_vec.pop ();
2569 aclass = ALLOCNO_CLASS (allocno);
2570 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2572 fprintf (ira_dump_file, " Popping");
2573 ira_print_expanded_allocno (allocno);
2574 fprintf (ira_dump_file, " -- ");
2576 if (aclass == NO_REGS)
2578 ALLOCNO_HARD_REGNO (allocno) = -1;
2579 ALLOCNO_ASSIGNED_P (allocno) = true;
2580 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2581 ira_assert
2582 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2583 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2584 fprintf (ira_dump_file, "assign memory\n");
2586 else if (assign_hard_reg (allocno, false))
2588 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2589 fprintf (ira_dump_file, "assign reg %d\n",
2590 ALLOCNO_HARD_REGNO (allocno));
2592 else if (ALLOCNO_ASSIGNED_P (allocno))
2594 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2595 fprintf (ira_dump_file, "spill%s\n",
2596 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2597 ? "" : "!");
2599 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2603 /* Set up number of available hard registers for allocno A. */
2604 static void
2605 setup_allocno_available_regs_num (ira_allocno_t a)
2607 int i, n, hard_regno, hard_regs_num, nwords;
2608 enum reg_class aclass;
2609 allocno_color_data_t data;
2611 aclass = ALLOCNO_CLASS (a);
2612 data = ALLOCNO_COLOR_DATA (a);
2613 data->available_regs_num = 0;
2614 if (aclass == NO_REGS)
2615 return;
2616 hard_regs_num = ira_class_hard_regs_num[aclass];
2617 nwords = ALLOCNO_NUM_OBJECTS (a);
2618 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2620 hard_regno = ira_class_hard_regs[aclass][i];
2621 /* Checking only profitable hard regs. */
2622 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2623 n++;
2625 data->available_regs_num = n;
2626 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2627 return;
2628 fprintf
2629 (ira_dump_file,
2630 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2631 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2632 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2633 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2634 fprintf (ira_dump_file, ", %snode: ",
2635 hard_reg_set_equal_p (data->profitable_hard_regs,
2636 data->hard_regs_node->hard_regs->set)
2637 ? "" : "^");
2638 print_hard_reg_set (ira_dump_file,
2639 data->hard_regs_node->hard_regs->set, false);
2640 for (i = 0; i < nwords; i++)
2642 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2644 if (nwords != 1)
2646 if (i != 0)
2647 fprintf (ira_dump_file, ", ");
2648 fprintf (ira_dump_file, " obj %d", i);
2650 fprintf (ira_dump_file, " (confl regs = ");
2651 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2652 false);
2653 fprintf (ira_dump_file, ")");
2655 fprintf (ira_dump_file, "\n");
2658 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2659 conflicting allocnos and hard registers. */
2660 static void
2661 put_allocno_into_bucket (ira_allocno_t allocno)
2663 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2664 setup_allocno_available_regs_num (allocno);
2665 if (setup_left_conflict_sizes_p (allocno))
2666 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2667 else
2668 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2671 /* Map: allocno number -> allocno priority. */
2672 static int *allocno_priorities;
2674 /* Set up priorities for N allocnos in array
2675 CONSIDERATION_ALLOCNOS. */
2676 static void
2677 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2679 int i, length, nrefs, priority, max_priority, mult;
2680 ira_allocno_t a;
2682 max_priority = 0;
2683 for (i = 0; i < n; i++)
2685 a = consideration_allocnos[i];
2686 nrefs = ALLOCNO_NREFS (a);
2687 ira_assert (nrefs >= 0);
2688 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2689 ira_assert (mult >= 0);
2690 allocno_priorities[ALLOCNO_NUM (a)]
2691 = priority
2692 = (mult
2693 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2694 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2695 if (priority < 0)
2696 priority = -priority;
2697 if (max_priority < priority)
2698 max_priority = priority;
2700 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2701 for (i = 0; i < n; i++)
2703 a = consideration_allocnos[i];
2704 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2705 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2706 length /= ALLOCNO_NUM_OBJECTS (a);
2707 if (length <= 0)
2708 length = 1;
2709 allocno_priorities[ALLOCNO_NUM (a)]
2710 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2714 /* Sort allocnos according to the profit of usage of a hard register
2715 instead of memory for them. */
2716 static int
2717 allocno_cost_compare_func (const void *v1p, const void *v2p)
2719 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2720 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2721 int c1, c2;
2723 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2724 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2725 if (c1 - c2)
2726 return c1 - c2;
2728 /* If regs are equally good, sort by allocno numbers, so that the
2729 results of qsort leave nothing to chance. */
2730 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2733 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2734 possible to hard registers. Let us try to improve allocation with
2735 cost point of view. This function improves the allocation by
2736 spilling some allocnos and assigning the freed hard registers to
2737 other allocnos if it decreases the overall allocation cost. */
2738 static void
2739 improve_allocation (void)
2741 unsigned int i;
2742 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2743 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2744 bool try_p;
2745 enum reg_class aclass;
2746 machine_mode mode;
2747 int *allocno_costs;
2748 int costs[FIRST_PSEUDO_REGISTER];
2749 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2750 ira_allocno_t a;
2751 bitmap_iterator bi;
2753 /* Don't bother to optimize the code with static chain pointer and
2754 non-local goto in order not to spill the chain pointer
2755 pseudo. */
2756 if (cfun->static_chain_decl && crtl->has_nonlocal_goto)
2757 return;
2758 /* Clear counts used to process conflicting allocnos only once for
2759 each allocno. */
2760 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2761 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2762 check = n = 0;
2763 /* Process each allocno and try to assign a hard register to it by
2764 spilling some its conflicting allocnos. */
2765 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2767 a = ira_allocnos[i];
2768 ALLOCNO_COLOR_DATA (a)->temp = 0;
2769 if (empty_profitable_hard_regs (a))
2770 continue;
2771 check++;
2772 aclass = ALLOCNO_CLASS (a);
2773 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2774 if (allocno_costs == NULL)
2775 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2776 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2777 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2778 else if (allocno_costs == NULL)
2779 /* It means that assigning a hard register is not profitable
2780 (we don't waste memory for hard register costs in this
2781 case). */
2782 continue;
2783 else
2784 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2785 try_p = false;
2786 get_conflict_and_start_profitable_regs (a, false,
2787 conflicting_regs,
2788 &profitable_hard_regs);
2789 class_size = ira_class_hard_regs_num[aclass];
2790 /* Set up cost improvement for usage of each profitable hard
2791 register for allocno A. */
2792 for (j = 0; j < class_size; j++)
2794 hregno = ira_class_hard_regs[aclass][j];
2795 if (! check_hard_reg_p (a, hregno,
2796 conflicting_regs, profitable_hard_regs))
2797 continue;
2798 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2799 k = allocno_costs == NULL ? 0 : j;
2800 costs[hregno] = (allocno_costs == NULL
2801 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2802 costs[hregno] -= base_cost;
2803 if (costs[hregno] < 0)
2804 try_p = true;
2806 if (! try_p)
2807 /* There is no chance to improve the allocation cost by
2808 assigning hard register to allocno A even without spilling
2809 conflicting allocnos. */
2810 continue;
2811 mode = ALLOCNO_MODE (a);
2812 nwords = ALLOCNO_NUM_OBJECTS (a);
2813 /* Process each allocno conflicting with A and update the cost
2814 improvement for profitable hard registers of A. To use a
2815 hard register for A we need to spill some conflicting
2816 allocnos and that creates penalty for the cost
2817 improvement. */
2818 for (word = 0; word < nwords; word++)
2820 ira_object_t conflict_obj;
2821 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2822 ira_object_conflict_iterator oci;
2824 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2826 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2828 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2829 /* We already processed this conflicting allocno
2830 because we processed earlier another object of the
2831 conflicting allocno. */
2832 continue;
2833 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2834 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2835 continue;
2836 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2837 k = (ira_class_hard_reg_index
2838 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2839 ira_assert (k >= 0);
2840 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2841 != NULL)
2842 spill_cost -= allocno_costs[k];
2843 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2844 != NULL)
2845 spill_cost -= allocno_costs[k];
2846 else
2847 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2848 conflict_nregs
2849 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2850 for (r = conflict_hregno;
2851 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2852 r--)
2853 if (check_hard_reg_p (a, r,
2854 conflicting_regs, profitable_hard_regs))
2855 costs[r] += spill_cost;
2856 for (r = conflict_hregno + 1;
2857 r < conflict_hregno + conflict_nregs;
2858 r++)
2859 if (check_hard_reg_p (a, r,
2860 conflicting_regs, profitable_hard_regs))
2861 costs[r] += spill_cost;
2864 min_cost = INT_MAX;
2865 best = -1;
2866 /* Now we choose hard register for A which results in highest
2867 allocation cost improvement. */
2868 for (j = 0; j < class_size; j++)
2870 hregno = ira_class_hard_regs[aclass][j];
2871 if (check_hard_reg_p (a, hregno,
2872 conflicting_regs, profitable_hard_regs)
2873 && min_cost > costs[hregno])
2875 best = hregno;
2876 min_cost = costs[hregno];
2879 if (min_cost >= 0)
2880 /* We are in a situation when assigning any hard register to A
2881 by spilling some conflicting allocnos does not improve the
2882 allocation cost. */
2883 continue;
2884 nregs = hard_regno_nregs[best][mode];
2885 /* Now spill conflicting allocnos which contain a hard register
2886 of A when we assign the best chosen hard register to it. */
2887 for (word = 0; word < nwords; word++)
2889 ira_object_t conflict_obj;
2890 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2891 ira_object_conflict_iterator oci;
2893 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2895 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2897 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2898 continue;
2899 conflict_nregs
2900 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2901 if (best + nregs <= conflict_hregno
2902 || conflict_hregno + conflict_nregs <= best)
2903 /* No intersection. */
2904 continue;
2905 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2906 sorted_allocnos[n++] = conflict_a;
2907 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2908 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2909 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2910 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2913 /* Assign the best chosen hard register to A. */
2914 ALLOCNO_HARD_REGNO (a) = best;
2915 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2916 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2917 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2919 if (n == 0)
2920 return;
2921 /* We spilled some allocnos to assign their hard registers to other
2922 allocnos. The spilled allocnos are now in array
2923 'sorted_allocnos'. There is still a possibility that some of the
2924 spilled allocnos can get hard registers. So let us try assign
2925 them hard registers again (just a reminder -- function
2926 'assign_hard_reg' assigns hard registers only if it is possible
2927 and profitable). We process the spilled allocnos with biggest
2928 benefit to get hard register first -- see function
2929 'allocno_cost_compare_func'. */
2930 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2931 allocno_cost_compare_func);
2932 for (j = 0; j < n; j++)
2934 a = sorted_allocnos[j];
2935 ALLOCNO_ASSIGNED_P (a) = false;
2936 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2938 fprintf (ira_dump_file, " ");
2939 ira_print_expanded_allocno (a);
2940 fprintf (ira_dump_file, " -- ");
2942 if (assign_hard_reg (a, false))
2944 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2945 fprintf (ira_dump_file, "assign hard reg %d\n",
2946 ALLOCNO_HARD_REGNO (a));
2948 else
2950 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2951 fprintf (ira_dump_file, "assign memory\n");
2956 /* Sort allocnos according to their priorities. */
2957 static int
2958 allocno_priority_compare_func (const void *v1p, const void *v2p)
2960 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2961 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2962 int pri1, pri2;
2964 /* Assign hard reg to static chain pointer pseudo first when
2965 non-local goto is used. */
2966 if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a1)))
2967 return 1;
2968 else if (non_spilled_static_chain_regno_p (ALLOCNO_REGNO (a2)))
2969 return -1;
2970 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2971 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2972 if (pri2 != pri1)
2973 return SORTGT (pri2, pri1);
2975 /* If regs are equally good, sort by allocnos, so that the results of
2976 qsort leave nothing to chance. */
2977 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2980 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2981 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2982 static void
2983 color_allocnos (void)
2985 unsigned int i, n;
2986 bitmap_iterator bi;
2987 ira_allocno_t a;
2989 setup_profitable_hard_regs ();
2990 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2992 int l, nr;
2993 HARD_REG_SET conflict_hard_regs;
2994 allocno_color_data_t data;
2995 ira_pref_t pref, next_pref;
2997 a = ira_allocnos[i];
2998 nr = ALLOCNO_NUM_OBJECTS (a);
2999 CLEAR_HARD_REG_SET (conflict_hard_regs);
3000 for (l = 0; l < nr; l++)
3002 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3003 IOR_HARD_REG_SET (conflict_hard_regs,
3004 OBJECT_CONFLICT_HARD_REGS (obj));
3006 data = ALLOCNO_COLOR_DATA (a);
3007 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
3009 next_pref = pref->next_pref;
3010 if (! ira_hard_reg_in_set_p (pref->hard_regno,
3011 ALLOCNO_MODE (a),
3012 data->profitable_hard_regs))
3013 ira_remove_pref (pref);
3016 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
3018 n = 0;
3019 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3021 a = ira_allocnos[i];
3022 if (ALLOCNO_CLASS (a) == NO_REGS)
3024 ALLOCNO_HARD_REGNO (a) = -1;
3025 ALLOCNO_ASSIGNED_P (a) = true;
3026 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3027 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3028 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3030 fprintf (ira_dump_file, " Spill");
3031 ira_print_expanded_allocno (a);
3032 fprintf (ira_dump_file, "\n");
3034 continue;
3036 sorted_allocnos[n++] = a;
3038 if (n != 0)
3040 setup_allocno_priorities (sorted_allocnos, n);
3041 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3042 allocno_priority_compare_func);
3043 for (i = 0; i < n; i++)
3045 a = sorted_allocnos[i];
3046 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3048 fprintf (ira_dump_file, " ");
3049 ira_print_expanded_allocno (a);
3050 fprintf (ira_dump_file, " -- ");
3052 if (assign_hard_reg (a, false))
3054 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3055 fprintf (ira_dump_file, "assign hard reg %d\n",
3056 ALLOCNO_HARD_REGNO (a));
3058 else
3060 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3061 fprintf (ira_dump_file, "assign memory\n");
3066 else
3068 form_allocno_hard_regs_nodes_forest ();
3069 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3070 print_hard_regs_forest (ira_dump_file);
3071 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3073 a = ira_allocnos[i];
3074 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3076 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3077 update_costs_from_prefs (a);
3079 else
3081 ALLOCNO_HARD_REGNO (a) = -1;
3082 ALLOCNO_ASSIGNED_P (a) = true;
3083 /* We don't need updated costs anymore. */
3084 ira_free_allocno_updated_costs (a);
3085 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3087 fprintf (ira_dump_file, " Spill");
3088 ira_print_expanded_allocno (a);
3089 fprintf (ira_dump_file, "\n");
3093 /* Put the allocnos into the corresponding buckets. */
3094 colorable_allocno_bucket = NULL;
3095 uncolorable_allocno_bucket = NULL;
3096 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3098 a = ira_allocnos[i];
3099 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3100 put_allocno_into_bucket (a);
3102 push_allocnos_to_stack ();
3103 pop_allocnos_from_stack ();
3104 finish_allocno_hard_regs_nodes_forest ();
3106 improve_allocation ();
3111 /* Output information about the loop given by its LOOP_TREE_NODE. */
3112 static void
3113 print_loop_title (ira_loop_tree_node_t loop_tree_node)
3115 unsigned int j;
3116 bitmap_iterator bi;
3117 ira_loop_tree_node_t subloop_node, dest_loop_node;
3118 edge e;
3119 edge_iterator ei;
3121 if (loop_tree_node->parent == NULL)
3122 fprintf (ira_dump_file,
3123 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3124 NUM_FIXED_BLOCKS);
3125 else
3127 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3128 fprintf (ira_dump_file,
3129 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3130 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3131 loop_tree_node->loop->header->index,
3132 loop_depth (loop_tree_node->loop));
3134 for (subloop_node = loop_tree_node->children;
3135 subloop_node != NULL;
3136 subloop_node = subloop_node->next)
3137 if (subloop_node->bb != NULL)
3139 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3140 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
3141 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3142 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3143 != loop_tree_node))
3144 fprintf (ira_dump_file, "(->%d:l%d)",
3145 e->dest->index, dest_loop_node->loop_num);
3147 fprintf (ira_dump_file, "\n all:");
3148 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3149 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3150 fprintf (ira_dump_file, "\n modified regnos:");
3151 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3152 fprintf (ira_dump_file, " %d", j);
3153 fprintf (ira_dump_file, "\n border:");
3154 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3155 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3156 fprintf (ira_dump_file, "\n Pressure:");
3157 for (j = 0; (int) j < ira_pressure_classes_num; j++)
3159 enum reg_class pclass;
3161 pclass = ira_pressure_classes[j];
3162 if (loop_tree_node->reg_pressure[pclass] == 0)
3163 continue;
3164 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3165 loop_tree_node->reg_pressure[pclass]);
3167 fprintf (ira_dump_file, "\n");
3170 /* Color the allocnos inside loop (in the extreme case it can be all
3171 of the function) given the corresponding LOOP_TREE_NODE. The
3172 function is called for each loop during top-down traverse of the
3173 loop tree. */
3174 static void
3175 color_pass (ira_loop_tree_node_t loop_tree_node)
3177 int regno, hard_regno, index = -1, n;
3178 int cost, exit_freq, enter_freq;
3179 unsigned int j;
3180 bitmap_iterator bi;
3181 machine_mode mode;
3182 enum reg_class rclass, aclass, pclass;
3183 ira_allocno_t a, subloop_allocno;
3184 ira_loop_tree_node_t subloop_node;
3186 ira_assert (loop_tree_node->bb == NULL);
3187 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3188 print_loop_title (loop_tree_node);
3190 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3191 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3192 n = 0;
3193 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3195 a = ira_allocnos[j];
3196 n++;
3197 if (! ALLOCNO_ASSIGNED_P (a))
3198 continue;
3199 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3201 allocno_color_data
3202 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3203 * n);
3204 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3205 curr_allocno_process = 0;
3206 n = 0;
3207 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3209 a = ira_allocnos[j];
3210 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3211 n++;
3213 init_allocno_threads ();
3214 /* Color all mentioned allocnos including transparent ones. */
3215 color_allocnos ();
3216 /* Process caps. They are processed just once. */
3217 if (flag_ira_region == IRA_REGION_MIXED
3218 || flag_ira_region == IRA_REGION_ALL)
3219 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3221 a = ira_allocnos[j];
3222 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3223 continue;
3224 /* Remove from processing in the next loop. */
3225 bitmap_clear_bit (consideration_allocno_bitmap, j);
3226 rclass = ALLOCNO_CLASS (a);
3227 pclass = ira_pressure_class_translate[rclass];
3228 if (flag_ira_region == IRA_REGION_MIXED
3229 && (loop_tree_node->reg_pressure[pclass]
3230 <= ira_class_hard_regs_num[pclass]))
3232 mode = ALLOCNO_MODE (a);
3233 hard_regno = ALLOCNO_HARD_REGNO (a);
3234 if (hard_regno >= 0)
3236 index = ira_class_hard_reg_index[rclass][hard_regno];
3237 ira_assert (index >= 0);
3239 regno = ALLOCNO_REGNO (a);
3240 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3241 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3242 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3243 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3244 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3245 if (hard_regno >= 0)
3246 update_costs_from_copies (subloop_allocno, true, true);
3247 /* We don't need updated costs anymore. */
3248 ira_free_allocno_updated_costs (subloop_allocno);
3251 /* Update costs of the corresponding allocnos (not caps) in the
3252 subloops. */
3253 for (subloop_node = loop_tree_node->subloops;
3254 subloop_node != NULL;
3255 subloop_node = subloop_node->subloop_next)
3257 ira_assert (subloop_node->bb == NULL);
3258 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3260 a = ira_allocnos[j];
3261 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3262 mode = ALLOCNO_MODE (a);
3263 rclass = ALLOCNO_CLASS (a);
3264 pclass = ira_pressure_class_translate[rclass];
3265 hard_regno = ALLOCNO_HARD_REGNO (a);
3266 /* Use hard register class here. ??? */
3267 if (hard_regno >= 0)
3269 index = ira_class_hard_reg_index[rclass][hard_regno];
3270 ira_assert (index >= 0);
3272 regno = ALLOCNO_REGNO (a);
3273 /* ??? conflict costs */
3274 subloop_allocno = subloop_node->regno_allocno_map[regno];
3275 if (subloop_allocno == NULL
3276 || ALLOCNO_CAP (subloop_allocno) != NULL)
3277 continue;
3278 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
3279 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3280 ALLOCNO_NUM (subloop_allocno)));
3281 if ((flag_ira_region == IRA_REGION_MIXED
3282 && (loop_tree_node->reg_pressure[pclass]
3283 <= ira_class_hard_regs_num[pclass]))
3284 || (pic_offset_table_rtx != NULL
3285 && regno == (int) REGNO (pic_offset_table_rtx))
3286 /* Avoid overlapped multi-registers. Moves between them
3287 might result in wrong code generation. */
3288 || (hard_regno >= 0
3289 && ira_reg_class_max_nregs[pclass][mode] > 1))
3291 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3293 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3294 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3295 if (hard_regno >= 0)
3296 update_costs_from_copies (subloop_allocno, true, true);
3297 /* We don't need updated costs anymore. */
3298 ira_free_allocno_updated_costs (subloop_allocno);
3300 continue;
3302 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3303 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3304 ira_assert (regno < ira_reg_equiv_len);
3305 if (ira_equiv_no_lvalue_p (regno))
3307 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3309 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3310 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3311 if (hard_regno >= 0)
3312 update_costs_from_copies (subloop_allocno, true, true);
3313 /* We don't need updated costs anymore. */
3314 ira_free_allocno_updated_costs (subloop_allocno);
3317 else if (hard_regno < 0)
3319 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3320 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3321 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3323 else
3325 aclass = ALLOCNO_CLASS (subloop_allocno);
3326 ira_init_register_move_cost_if_necessary (mode);
3327 cost = (ira_register_move_cost[mode][rclass][rclass]
3328 * (exit_freq + enter_freq));
3329 ira_allocate_and_set_or_copy_costs
3330 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3331 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
3332 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3333 ira_allocate_and_set_or_copy_costs
3334 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
3335 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
3336 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3337 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
3338 -= cost;
3339 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3340 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
3341 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3342 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
3343 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3344 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3345 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
3349 ira_free (allocno_color_data);
3350 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3352 a = ira_allocnos[j];
3353 ALLOCNO_ADD_DATA (a) = NULL;
3357 /* Initialize the common data for coloring and calls functions to do
3358 Chaitin-Briggs and regional coloring. */
3359 static void
3360 do_coloring (void)
3362 coloring_allocno_bitmap = ira_allocate_bitmap ();
3363 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3364 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3366 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3368 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3369 ira_print_disposition (ira_dump_file);
3371 ira_free_bitmap (coloring_allocno_bitmap);
3376 /* Move spill/restore code, which are to be generated in ira-emit.c,
3377 to less frequent points (if it is profitable) by reassigning some
3378 allocnos (in loop with subloops containing in another loop) to
3379 memory which results in longer live-range where the corresponding
3380 pseudo-registers will be in memory. */
3381 static void
3382 move_spill_restore (void)
3384 int cost, regno, hard_regno, hard_regno2, index;
3385 bool changed_p;
3386 int enter_freq, exit_freq;
3387 machine_mode mode;
3388 enum reg_class rclass;
3389 ira_allocno_t a, parent_allocno, subloop_allocno;
3390 ira_loop_tree_node_t parent, loop_node, subloop_node;
3391 ira_allocno_iterator ai;
3393 for (;;)
3395 changed_p = false;
3396 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3397 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3398 FOR_EACH_ALLOCNO (a, ai)
3400 regno = ALLOCNO_REGNO (a);
3401 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3402 if (ALLOCNO_CAP_MEMBER (a) != NULL
3403 || ALLOCNO_CAP (a) != NULL
3404 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3405 || loop_node->children == NULL
3406 /* don't do the optimization because it can create
3407 copies and the reload pass can spill the allocno set
3408 by copy although the allocno will not get memory
3409 slot. */
3410 || ira_equiv_no_lvalue_p (regno)
3411 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a))
3412 /* Do not spill static chain pointer pseudo when
3413 non-local goto is used. */
3414 || non_spilled_static_chain_regno_p (regno))
3415 continue;
3416 mode = ALLOCNO_MODE (a);
3417 rclass = ALLOCNO_CLASS (a);
3418 index = ira_class_hard_reg_index[rclass][hard_regno];
3419 ira_assert (index >= 0);
3420 cost = (ALLOCNO_MEMORY_COST (a)
3421 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3422 ? ALLOCNO_CLASS_COST (a)
3423 : ALLOCNO_HARD_REG_COSTS (a)[index]));
3424 ira_init_register_move_cost_if_necessary (mode);
3425 for (subloop_node = loop_node->subloops;
3426 subloop_node != NULL;
3427 subloop_node = subloop_node->subloop_next)
3429 ira_assert (subloop_node->bb == NULL);
3430 subloop_allocno = subloop_node->regno_allocno_map[regno];
3431 if (subloop_allocno == NULL)
3432 continue;
3433 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
3434 /* We have accumulated cost. To get the real cost of
3435 allocno usage in the loop we should subtract costs of
3436 the subloop allocnos. */
3437 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3438 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
3439 ? ALLOCNO_CLASS_COST (subloop_allocno)
3440 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3441 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3442 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3443 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3444 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3445 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3446 else
3448 cost
3449 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3450 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3451 if (hard_regno2 != hard_regno)
3452 cost -= (ira_register_move_cost[mode][rclass][rclass]
3453 * (exit_freq + enter_freq));
3456 if ((parent = loop_node->parent) != NULL
3457 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3459 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
3460 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3461 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3462 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3463 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3464 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3465 else
3467 cost
3468 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3469 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3470 if (hard_regno2 != hard_regno)
3471 cost -= (ira_register_move_cost[mode][rclass][rclass]
3472 * (exit_freq + enter_freq));
3475 if (cost < 0)
3477 ALLOCNO_HARD_REGNO (a) = -1;
3478 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3480 fprintf
3481 (ira_dump_file,
3482 " Moving spill/restore for a%dr%d up from loop %d",
3483 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3484 fprintf (ira_dump_file, " - profit %d\n", -cost);
3486 changed_p = true;
3489 if (! changed_p)
3490 break;
3496 /* Update current hard reg costs and current conflict hard reg costs
3497 for allocno A. It is done by processing its copies containing
3498 other allocnos already assigned. */
3499 static void
3500 update_curr_costs (ira_allocno_t a)
3502 int i, hard_regno, cost;
3503 machine_mode mode;
3504 enum reg_class aclass, rclass;
3505 ira_allocno_t another_a;
3506 ira_copy_t cp, next_cp;
3508 ira_free_allocno_updated_costs (a);
3509 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3510 aclass = ALLOCNO_CLASS (a);
3511 if (aclass == NO_REGS)
3512 return;
3513 mode = ALLOCNO_MODE (a);
3514 ira_init_register_move_cost_if_necessary (mode);
3515 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3517 if (cp->first == a)
3519 next_cp = cp->next_first_allocno_copy;
3520 another_a = cp->second;
3522 else if (cp->second == a)
3524 next_cp = cp->next_second_allocno_copy;
3525 another_a = cp->first;
3527 else
3528 gcc_unreachable ();
3529 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3530 || ! ALLOCNO_ASSIGNED_P (another_a)
3531 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3532 continue;
3533 rclass = REGNO_REG_CLASS (hard_regno);
3534 i = ira_class_hard_reg_index[aclass][hard_regno];
3535 if (i < 0)
3536 continue;
3537 cost = (cp->first == a
3538 ? ira_register_move_cost[mode][rclass][aclass]
3539 : ira_register_move_cost[mode][aclass][rclass]);
3540 ira_allocate_and_set_or_copy_costs
3541 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3542 ALLOCNO_HARD_REG_COSTS (a));
3543 ira_allocate_and_set_or_copy_costs
3544 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3545 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3546 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3547 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3551 /* Try to assign hard registers to the unassigned allocnos and
3552 allocnos conflicting with them or conflicting with allocnos whose
3553 regno >= START_REGNO. The function is called after ira_flattening,
3554 so more allocnos (including ones created in ira-emit.c) will have a
3555 chance to get a hard register. We use simple assignment algorithm
3556 based on priorities. */
3557 void
3558 ira_reassign_conflict_allocnos (int start_regno)
3560 int i, allocnos_to_color_num;
3561 ira_allocno_t a;
3562 enum reg_class aclass;
3563 bitmap allocnos_to_color;
3564 ira_allocno_iterator ai;
3566 allocnos_to_color = ira_allocate_bitmap ();
3567 allocnos_to_color_num = 0;
3568 FOR_EACH_ALLOCNO (a, ai)
3570 int n = ALLOCNO_NUM_OBJECTS (a);
3572 if (! ALLOCNO_ASSIGNED_P (a)
3573 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3575 if (ALLOCNO_CLASS (a) != NO_REGS)
3576 sorted_allocnos[allocnos_to_color_num++] = a;
3577 else
3579 ALLOCNO_ASSIGNED_P (a) = true;
3580 ALLOCNO_HARD_REGNO (a) = -1;
3581 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3582 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3584 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3586 if (ALLOCNO_REGNO (a) < start_regno
3587 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3588 continue;
3589 for (i = 0; i < n; i++)
3591 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3592 ira_object_t conflict_obj;
3593 ira_object_conflict_iterator oci;
3595 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3597 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3599 ira_assert (ira_reg_classes_intersect_p
3600 [aclass][ALLOCNO_CLASS (conflict_a)]);
3601 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3602 continue;
3603 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3607 ira_free_bitmap (allocnos_to_color);
3608 if (allocnos_to_color_num > 1)
3610 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3611 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3612 allocno_priority_compare_func);
3614 for (i = 0; i < allocnos_to_color_num; i++)
3616 a = sorted_allocnos[i];
3617 ALLOCNO_ASSIGNED_P (a) = false;
3618 update_curr_costs (a);
3620 for (i = 0; i < allocnos_to_color_num; i++)
3622 a = sorted_allocnos[i];
3623 if (assign_hard_reg (a, true))
3625 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3626 fprintf
3627 (ira_dump_file,
3628 " Secondary allocation: assign hard reg %d to reg %d\n",
3629 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3636 /* This page contains functions used to find conflicts using allocno
3637 live ranges. */
3639 #ifdef ENABLE_IRA_CHECKING
3641 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3642 intersect. This should be used when there is only one region.
3643 Currently this is used during reload. */
3644 static bool
3645 conflict_by_live_ranges_p (int regno1, int regno2)
3647 ira_allocno_t a1, a2;
3649 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3650 && regno2 >= FIRST_PSEUDO_REGISTER);
3651 /* Reg info calculated by dataflow infrastructure can be different
3652 from one calculated by regclass. */
3653 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3654 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3655 return false;
3656 return allocnos_conflict_by_live_ranges_p (a1, a2);
3659 #endif
3663 /* This page contains code to coalesce memory stack slots used by
3664 spilled allocnos. This results in smaller stack frame, better data
3665 locality, and in smaller code for some architectures like
3666 x86/x86_64 where insn size depends on address displacement value.
3667 On the other hand, it can worsen insn scheduling after the RA but
3668 in practice it is less important than smaller stack frames. */
3670 /* TRUE if we coalesced some allocnos. In other words, if we got
3671 loops formed by members first_coalesced_allocno and
3672 next_coalesced_allocno containing more one allocno. */
3673 static bool allocno_coalesced_p;
3675 /* Bitmap used to prevent a repeated allocno processing because of
3676 coalescing. */
3677 static bitmap processed_coalesced_allocno_bitmap;
3679 /* See below. */
3680 typedef struct coalesce_data *coalesce_data_t;
3682 /* To decrease footprint of ira_allocno structure we store all data
3683 needed only for coalescing in the following structure. */
3684 struct coalesce_data
3686 /* Coalesced allocnos form a cyclic list. One allocno given by
3687 FIRST represents all coalesced allocnos. The
3688 list is chained by NEXT. */
3689 ira_allocno_t first;
3690 ira_allocno_t next;
3691 int temp;
3694 /* Container for storing allocno data concerning coalescing. */
3695 static coalesce_data_t allocno_coalesce_data;
3697 /* Macro to access the data concerning coalescing. */
3698 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3700 /* Merge two sets of coalesced allocnos given correspondingly by
3701 allocnos A1 and A2 (more accurately merging A2 set into A1
3702 set). */
3703 static void
3704 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3706 ira_allocno_t a, first, last, next;
3708 first = ALLOCNO_COALESCE_DATA (a1)->first;
3709 a = ALLOCNO_COALESCE_DATA (a2)->first;
3710 if (first == a)
3711 return;
3712 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3713 a = ALLOCNO_COALESCE_DATA (a)->next)
3715 ALLOCNO_COALESCE_DATA (a)->first = first;
3716 if (a == a2)
3717 break;
3718 last = a;
3720 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3721 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3722 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3725 /* Return TRUE if there are conflicting allocnos from two sets of
3726 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3727 use live ranges to find conflicts because conflicts are represented
3728 only for allocnos of the same allocno class and during the reload
3729 pass we coalesce allocnos for sharing stack memory slots. */
3730 static bool
3731 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3733 ira_allocno_t a, conflict_a;
3735 if (allocno_coalesced_p)
3737 bitmap_clear (processed_coalesced_allocno_bitmap);
3738 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3739 a = ALLOCNO_COALESCE_DATA (a)->next)
3741 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3742 if (a == a1)
3743 break;
3746 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3747 a = ALLOCNO_COALESCE_DATA (a)->next)
3749 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3750 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3752 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3753 return true;
3754 if (conflict_a == a1)
3755 break;
3757 if (a == a2)
3758 break;
3760 return false;
3763 /* The major function for aggressive allocno coalescing. We coalesce
3764 only spilled allocnos. If some allocnos have been coalesced, we
3765 set up flag allocno_coalesced_p. */
3766 static void
3767 coalesce_allocnos (void)
3769 ira_allocno_t a;
3770 ira_copy_t cp, next_cp;
3771 unsigned int j;
3772 int i, n, cp_num, regno;
3773 bitmap_iterator bi;
3775 cp_num = 0;
3776 /* Collect copies. */
3777 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3779 a = ira_allocnos[j];
3780 regno = ALLOCNO_REGNO (a);
3781 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3782 || ira_equiv_no_lvalue_p (regno))
3783 continue;
3784 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3786 if (cp->first == a)
3788 next_cp = cp->next_first_allocno_copy;
3789 regno = ALLOCNO_REGNO (cp->second);
3790 /* For priority coloring we coalesce allocnos only with
3791 the same allocno class not with intersected allocno
3792 classes as it were possible. It is done for
3793 simplicity. */
3794 if ((cp->insn != NULL || cp->constraint_p)
3795 && ALLOCNO_ASSIGNED_P (cp->second)
3796 && ALLOCNO_HARD_REGNO (cp->second) < 0
3797 && ! ira_equiv_no_lvalue_p (regno))
3798 sorted_copies[cp_num++] = cp;
3800 else if (cp->second == a)
3801 next_cp = cp->next_second_allocno_copy;
3802 else
3803 gcc_unreachable ();
3806 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3807 /* Coalesced copies, most frequently executed first. */
3808 for (; cp_num != 0;)
3810 for (i = 0; i < cp_num; i++)
3812 cp = sorted_copies[i];
3813 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3815 allocno_coalesced_p = true;
3816 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3817 fprintf
3818 (ira_dump_file,
3819 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3820 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3821 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3822 cp->freq);
3823 merge_allocnos (cp->first, cp->second);
3824 i++;
3825 break;
3828 /* Collect the rest of copies. */
3829 for (n = 0; i < cp_num; i++)
3831 cp = sorted_copies[i];
3832 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3833 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3834 sorted_copies[n++] = cp;
3836 cp_num = n;
3840 /* Usage cost and order number of coalesced allocno set to which
3841 given pseudo register belongs to. */
3842 static int *regno_coalesced_allocno_cost;
3843 static int *regno_coalesced_allocno_num;
3845 /* Sort pseudos according frequencies of coalesced allocno sets they
3846 belong to (putting most frequently ones first), and according to
3847 coalesced allocno set order numbers. */
3848 static int
3849 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3851 const int regno1 = *(const int *) v1p;
3852 const int regno2 = *(const int *) v2p;
3853 int diff;
3855 if ((diff = (regno_coalesced_allocno_cost[regno2]
3856 - regno_coalesced_allocno_cost[regno1])) != 0)
3857 return diff;
3858 if ((diff = (regno_coalesced_allocno_num[regno1]
3859 - regno_coalesced_allocno_num[regno2])) != 0)
3860 return diff;
3861 return regno1 - regno2;
3864 /* Widest width in which each pseudo reg is referred to (via subreg).
3865 It is used for sorting pseudo registers. */
3866 static unsigned int *regno_max_ref_width;
3868 /* Sort pseudos according their slot numbers (putting ones with
3869 smaller numbers first, or last when the frame pointer is not
3870 needed). */
3871 static int
3872 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3874 const int regno1 = *(const int *) v1p;
3875 const int regno2 = *(const int *) v2p;
3876 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3877 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3878 int diff, slot_num1, slot_num2;
3879 int total_size1, total_size2;
3881 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3883 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3884 return regno1 - regno2;
3885 return 1;
3887 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3888 return -1;
3889 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3890 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3891 if ((diff = slot_num1 - slot_num2) != 0)
3892 return (frame_pointer_needed
3893 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
3894 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3895 regno_max_ref_width[regno1]);
3896 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3897 regno_max_ref_width[regno2]);
3898 if ((diff = total_size2 - total_size1) != 0)
3899 return diff;
3900 return regno1 - regno2;
3903 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3904 for coalesced allocno sets containing allocnos with their regnos
3905 given in array PSEUDO_REGNOS of length N. */
3906 static void
3907 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3909 int i, num, regno, cost;
3910 ira_allocno_t allocno, a;
3912 for (num = i = 0; i < n; i++)
3914 regno = pseudo_regnos[i];
3915 allocno = ira_regno_allocno_map[regno];
3916 if (allocno == NULL)
3918 regno_coalesced_allocno_cost[regno] = 0;
3919 regno_coalesced_allocno_num[regno] = ++num;
3920 continue;
3922 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3923 continue;
3924 num++;
3925 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3926 a = ALLOCNO_COALESCE_DATA (a)->next)
3928 cost += ALLOCNO_FREQ (a);
3929 if (a == allocno)
3930 break;
3932 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3933 a = ALLOCNO_COALESCE_DATA (a)->next)
3935 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3936 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3937 if (a == allocno)
3938 break;
3943 /* Collect spilled allocnos representing coalesced allocno sets (the
3944 first coalesced allocno). The collected allocnos are returned
3945 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3946 number of the collected allocnos. The allocnos are given by their
3947 regnos in array PSEUDO_REGNOS of length N. */
3948 static int
3949 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3950 ira_allocno_t *spilled_coalesced_allocnos)
3952 int i, num, regno;
3953 ira_allocno_t allocno;
3955 for (num = i = 0; i < n; i++)
3957 regno = pseudo_regnos[i];
3958 allocno = ira_regno_allocno_map[regno];
3959 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3960 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3961 continue;
3962 spilled_coalesced_allocnos[num++] = allocno;
3964 return num;
3967 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3968 given slot contains live ranges of coalesced allocnos assigned to
3969 given slot. */
3970 static live_range_t *slot_coalesced_allocnos_live_ranges;
3972 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3973 ranges intersected with live ranges of coalesced allocnos assigned
3974 to slot with number N. */
3975 static bool
3976 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3978 ira_allocno_t a;
3980 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3981 a = ALLOCNO_COALESCE_DATA (a)->next)
3983 int i;
3984 int nr = ALLOCNO_NUM_OBJECTS (a);
3986 for (i = 0; i < nr; i++)
3988 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3990 if (ira_live_ranges_intersect_p
3991 (slot_coalesced_allocnos_live_ranges[n],
3992 OBJECT_LIVE_RANGES (obj)))
3993 return true;
3995 if (a == allocno)
3996 break;
3998 return false;
4001 /* Update live ranges of slot to which coalesced allocnos represented
4002 by ALLOCNO were assigned. */
4003 static void
4004 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
4006 int i, n;
4007 ira_allocno_t a;
4008 live_range_t r;
4010 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
4011 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4012 a = ALLOCNO_COALESCE_DATA (a)->next)
4014 int nr = ALLOCNO_NUM_OBJECTS (a);
4015 for (i = 0; i < nr; i++)
4017 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4019 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
4020 slot_coalesced_allocnos_live_ranges[n]
4021 = ira_merge_live_ranges
4022 (slot_coalesced_allocnos_live_ranges[n], r);
4024 if (a == allocno)
4025 break;
4029 /* We have coalesced allocnos involving in copies. Coalesce allocnos
4030 further in order to share the same memory stack slot. Allocnos
4031 representing sets of allocnos coalesced before the call are given
4032 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4033 some allocnos were coalesced in the function. */
4034 static bool
4035 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4037 int i, j, n, last_coalesced_allocno_num;
4038 ira_allocno_t allocno, a;
4039 bool merged_p = false;
4040 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4042 slot_coalesced_allocnos_live_ranges
4043 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4044 memset (slot_coalesced_allocnos_live_ranges, 0,
4045 sizeof (live_range_t) * ira_allocnos_num);
4046 last_coalesced_allocno_num = 0;
4047 /* Coalesce non-conflicting spilled allocnos preferring most
4048 frequently used. */
4049 for (i = 0; i < num; i++)
4051 allocno = spilled_coalesced_allocnos[i];
4052 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4053 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
4054 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4055 continue;
4056 for (j = 0; j < i; j++)
4058 a = spilled_coalesced_allocnos[j];
4059 n = ALLOCNO_COALESCE_DATA (a)->temp;
4060 if (ALLOCNO_COALESCE_DATA (a)->first == a
4061 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
4062 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
4063 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4064 break;
4066 if (j >= i)
4068 /* No coalescing: set up number for coalesced allocnos
4069 represented by ALLOCNO. */
4070 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
4071 setup_slot_coalesced_allocno_live_ranges (allocno);
4073 else
4075 allocno_coalesced_p = true;
4076 merged_p = true;
4077 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4078 fprintf (ira_dump_file,
4079 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4080 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4081 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
4082 ALLOCNO_COALESCE_DATA (allocno)->temp
4083 = ALLOCNO_COALESCE_DATA (a)->temp;
4084 setup_slot_coalesced_allocno_live_ranges (allocno);
4085 merge_allocnos (a, allocno);
4086 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
4089 for (i = 0; i < ira_allocnos_num; i++)
4090 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4091 ira_free (slot_coalesced_allocnos_live_ranges);
4092 return merged_p;
4095 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4096 subsequent assigning stack slots to them in the reload pass. To do
4097 this we coalesce spilled allocnos first to decrease the number of
4098 memory-memory move insns. This function is called by the
4099 reload. */
4100 void
4101 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4102 unsigned int *reg_max_ref_width)
4104 int max_regno = max_reg_num ();
4105 int i, regno, num, slot_num;
4106 ira_allocno_t allocno, a;
4107 ira_allocno_iterator ai;
4108 ira_allocno_t *spilled_coalesced_allocnos;
4110 ira_assert (! ira_use_lra_p);
4112 /* Set up allocnos can be coalesced. */
4113 coloring_allocno_bitmap = ira_allocate_bitmap ();
4114 for (i = 0; i < n; i++)
4116 regno = pseudo_regnos[i];
4117 allocno = ira_regno_allocno_map[regno];
4118 if (allocno != NULL)
4119 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
4121 allocno_coalesced_p = false;
4122 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4123 allocno_coalesce_data
4124 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4125 * ira_allocnos_num);
4126 /* Initialize coalesce data for allocnos. */
4127 FOR_EACH_ALLOCNO (a, ai)
4129 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4130 ALLOCNO_COALESCE_DATA (a)->first = a;
4131 ALLOCNO_COALESCE_DATA (a)->next = a;
4133 coalesce_allocnos ();
4134 ira_free_bitmap (coloring_allocno_bitmap);
4135 regno_coalesced_allocno_cost
4136 = (int *) ira_allocate (max_regno * sizeof (int));
4137 regno_coalesced_allocno_num
4138 = (int *) ira_allocate (max_regno * sizeof (int));
4139 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4140 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4141 /* Sort regnos according frequencies of the corresponding coalesced
4142 allocno sets. */
4143 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4144 spilled_coalesced_allocnos
4145 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4146 * sizeof (ira_allocno_t));
4147 /* Collect allocnos representing the spilled coalesced allocno
4148 sets. */
4149 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4150 spilled_coalesced_allocnos);
4151 if (flag_ira_share_spill_slots
4152 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4154 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4155 qsort (pseudo_regnos, n, sizeof (int),
4156 coalesced_pseudo_reg_freq_compare);
4157 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4158 spilled_coalesced_allocnos);
4160 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4161 allocno_coalesced_p = false;
4162 /* Assign stack slot numbers to spilled allocno sets, use smaller
4163 numbers for most frequently used coalesced allocnos. -1 is
4164 reserved for dynamic search of stack slots for pseudos spilled by
4165 the reload. */
4166 slot_num = 1;
4167 for (i = 0; i < num; i++)
4169 allocno = spilled_coalesced_allocnos[i];
4170 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4171 || ALLOCNO_HARD_REGNO (allocno) >= 0
4172 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4173 continue;
4174 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4175 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4176 slot_num++;
4177 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4178 a = ALLOCNO_COALESCE_DATA (a)->next)
4180 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4181 ALLOCNO_HARD_REGNO (a) = -slot_num;
4182 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4183 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
4184 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
4185 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
4186 reg_max_ref_width[ALLOCNO_REGNO (a)]));
4188 if (a == allocno)
4189 break;
4191 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4192 fprintf (ira_dump_file, "\n");
4194 ira_spilled_reg_stack_slots_num = slot_num - 1;
4195 ira_free (spilled_coalesced_allocnos);
4196 /* Sort regnos according the slot numbers. */
4197 regno_max_ref_width = reg_max_ref_width;
4198 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
4199 FOR_EACH_ALLOCNO (a, ai)
4200 ALLOCNO_ADD_DATA (a) = NULL;
4201 ira_free (allocno_coalesce_data);
4202 ira_free (regno_coalesced_allocno_num);
4203 ira_free (regno_coalesced_allocno_cost);
4208 /* This page contains code used by the reload pass to improve the
4209 final code. */
4211 /* The function is called from reload to mark changes in the
4212 allocation of REGNO made by the reload. Remember that reg_renumber
4213 reflects the change result. */
4214 void
4215 ira_mark_allocation_change (int regno)
4217 ira_allocno_t a = ira_regno_allocno_map[regno];
4218 int old_hard_regno, hard_regno, cost;
4219 enum reg_class aclass = ALLOCNO_CLASS (a);
4221 ira_assert (a != NULL);
4222 hard_regno = reg_renumber[regno];
4223 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4224 return;
4225 if (old_hard_regno < 0)
4226 cost = -ALLOCNO_MEMORY_COST (a);
4227 else
4229 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
4230 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
4231 ? ALLOCNO_CLASS_COST (a)
4232 : ALLOCNO_HARD_REG_COSTS (a)
4233 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
4234 update_costs_from_copies (a, false, false);
4236 ira_overall_cost -= cost;
4237 ALLOCNO_HARD_REGNO (a) = hard_regno;
4238 if (hard_regno < 0)
4240 ALLOCNO_HARD_REGNO (a) = -1;
4241 cost += ALLOCNO_MEMORY_COST (a);
4243 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
4245 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
4246 ? ALLOCNO_CLASS_COST (a)
4247 : ALLOCNO_HARD_REG_COSTS (a)
4248 [ira_class_hard_reg_index[aclass][hard_regno]]);
4249 update_costs_from_copies (a, true, false);
4251 else
4252 /* Reload changed class of the allocno. */
4253 cost = 0;
4254 ira_overall_cost += cost;
4257 /* This function is called when reload deletes memory-memory move. In
4258 this case we marks that the allocation of the corresponding
4259 allocnos should be not changed in future. Otherwise we risk to get
4260 a wrong code. */
4261 void
4262 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4264 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4265 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4267 ira_assert (dst != NULL && src != NULL
4268 && ALLOCNO_HARD_REGNO (dst) < 0
4269 && ALLOCNO_HARD_REGNO (src) < 0);
4270 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4271 ALLOCNO_DONT_REASSIGN_P (src) = true;
4274 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4275 allocno A and return TRUE in the case of success. */
4276 static bool
4277 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4279 int hard_regno;
4280 enum reg_class aclass;
4281 int regno = ALLOCNO_REGNO (a);
4282 HARD_REG_SET saved[2];
4283 int i, n;
4285 n = ALLOCNO_NUM_OBJECTS (a);
4286 for (i = 0; i < n; i++)
4288 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4289 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4290 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4291 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4292 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4293 call_used_reg_set);
4295 ALLOCNO_ASSIGNED_P (a) = false;
4296 aclass = ALLOCNO_CLASS (a);
4297 update_curr_costs (a);
4298 assign_hard_reg (a, true);
4299 hard_regno = ALLOCNO_HARD_REGNO (a);
4300 reg_renumber[regno] = hard_regno;
4301 if (hard_regno < 0)
4302 ALLOCNO_HARD_REGNO (a) = -1;
4303 else
4305 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4306 ira_overall_cost
4307 -= (ALLOCNO_MEMORY_COST (a)
4308 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4309 ? ALLOCNO_CLASS_COST (a)
4310 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4311 [aclass][hard_regno]]));
4312 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
4313 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4314 call_used_reg_set))
4316 ira_assert (flag_caller_saves);
4317 caller_save_needed = 1;
4321 /* If we found a hard register, modify the RTL for the pseudo
4322 register to show the hard register, and mark the pseudo register
4323 live. */
4324 if (reg_renumber[regno] >= 0)
4326 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4327 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4328 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4329 mark_home_live (regno);
4331 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4332 fprintf (ira_dump_file, "\n");
4333 for (i = 0; i < n; i++)
4335 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4336 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4338 return reg_renumber[regno] >= 0;
4341 /* Sort pseudos according their usage frequencies (putting most
4342 frequently ones first). */
4343 static int
4344 pseudo_reg_compare (const void *v1p, const void *v2p)
4346 int regno1 = *(const int *) v1p;
4347 int regno2 = *(const int *) v2p;
4348 int diff;
4350 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4351 return diff;
4352 return regno1 - regno2;
4355 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4356 NUM of them) or spilled pseudos conflicting with pseudos in
4357 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4358 allocation has been changed. The function doesn't use
4359 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4360 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4361 is called by the reload pass at the end of each reload
4362 iteration. */
4363 bool
4364 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4365 HARD_REG_SET bad_spill_regs,
4366 HARD_REG_SET *pseudo_forbidden_regs,
4367 HARD_REG_SET *pseudo_previous_regs,
4368 bitmap spilled)
4370 int i, n, regno;
4371 bool changed_p;
4372 ira_allocno_t a;
4373 HARD_REG_SET forbidden_regs;
4374 bitmap temp = BITMAP_ALLOC (NULL);
4376 /* Add pseudos which conflict with pseudos already in
4377 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4378 to allocating in two steps as some of the conflicts might have
4379 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4380 for (i = 0; i < num; i++)
4381 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4383 for (i = 0, n = num; i < n; i++)
4385 int nr, j;
4386 int regno = spilled_pseudo_regs[i];
4387 bitmap_set_bit (temp, regno);
4389 a = ira_regno_allocno_map[regno];
4390 nr = ALLOCNO_NUM_OBJECTS (a);
4391 for (j = 0; j < nr; j++)
4393 ira_object_t conflict_obj;
4394 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4395 ira_object_conflict_iterator oci;
4397 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4399 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4400 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4401 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4402 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4404 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4405 /* ?!? This seems wrong. */
4406 bitmap_set_bit (consideration_allocno_bitmap,
4407 ALLOCNO_NUM (conflict_a));
4413 if (num > 1)
4414 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4415 changed_p = false;
4416 /* Try to assign hard registers to pseudos from
4417 SPILLED_PSEUDO_REGS. */
4418 for (i = 0; i < num; i++)
4420 regno = spilled_pseudo_regs[i];
4421 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4422 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4423 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4424 gcc_assert (reg_renumber[regno] < 0);
4425 a = ira_regno_allocno_map[regno];
4426 ira_mark_allocation_change (regno);
4427 ira_assert (reg_renumber[regno] < 0);
4428 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4429 fprintf (ira_dump_file,
4430 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4431 ALLOCNO_MEMORY_COST (a)
4432 - ALLOCNO_CLASS_COST (a));
4433 allocno_reload_assign (a, forbidden_regs);
4434 if (reg_renumber[regno] >= 0)
4436 CLEAR_REGNO_REG_SET (spilled, regno);
4437 changed_p = true;
4440 BITMAP_FREE (temp);
4441 return changed_p;
4444 /* The function is called by reload and returns already allocated
4445 stack slot (if any) for REGNO with given INHERENT_SIZE and
4446 TOTAL_SIZE. In the case of failure to find a slot which can be
4447 used for REGNO, the function returns NULL. */
4449 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4450 unsigned int total_size)
4452 unsigned int i;
4453 int slot_num, best_slot_num;
4454 int cost, best_cost;
4455 ira_copy_t cp, next_cp;
4456 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4457 rtx x;
4458 bitmap_iterator bi;
4459 struct ira_spilled_reg_stack_slot *slot = NULL;
4461 ira_assert (! ira_use_lra_p);
4463 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4464 && inherent_size <= total_size
4465 && ALLOCNO_HARD_REGNO (allocno) < 0);
4466 if (! flag_ira_share_spill_slots)
4467 return NULL_RTX;
4468 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4469 if (slot_num != -1)
4471 slot = &ira_spilled_reg_stack_slots[slot_num];
4472 x = slot->mem;
4474 else
4476 best_cost = best_slot_num = -1;
4477 x = NULL_RTX;
4478 /* It means that the pseudo was spilled in the reload pass, try
4479 to reuse a slot. */
4480 for (slot_num = 0;
4481 slot_num < ira_spilled_reg_stack_slots_num;
4482 slot_num++)
4484 slot = &ira_spilled_reg_stack_slots[slot_num];
4485 if (slot->mem == NULL_RTX)
4486 continue;
4487 if (slot->width < total_size
4488 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4489 continue;
4491 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4492 FIRST_PSEUDO_REGISTER, i, bi)
4494 another_allocno = ira_regno_allocno_map[i];
4495 if (allocnos_conflict_by_live_ranges_p (allocno,
4496 another_allocno))
4497 goto cont;
4499 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4500 cp != NULL;
4501 cp = next_cp)
4503 if (cp->first == allocno)
4505 next_cp = cp->next_first_allocno_copy;
4506 another_allocno = cp->second;
4508 else if (cp->second == allocno)
4510 next_cp = cp->next_second_allocno_copy;
4511 another_allocno = cp->first;
4513 else
4514 gcc_unreachable ();
4515 if (cp->insn == NULL_RTX)
4516 continue;
4517 if (bitmap_bit_p (&slot->spilled_regs,
4518 ALLOCNO_REGNO (another_allocno)))
4519 cost += cp->freq;
4521 if (cost > best_cost)
4523 best_cost = cost;
4524 best_slot_num = slot_num;
4526 cont:
4529 if (best_cost >= 0)
4531 slot_num = best_slot_num;
4532 slot = &ira_spilled_reg_stack_slots[slot_num];
4533 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4534 x = slot->mem;
4535 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4538 if (x != NULL_RTX)
4540 ira_assert (slot->width >= total_size);
4541 #ifdef ENABLE_IRA_CHECKING
4542 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4543 FIRST_PSEUDO_REGISTER, i, bi)
4545 ira_assert (! conflict_by_live_ranges_p (regno, i));
4547 #endif
4548 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4549 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4551 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4552 regno, REG_FREQ (regno), slot_num);
4553 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4554 FIRST_PSEUDO_REGISTER, i, bi)
4556 if ((unsigned) regno != i)
4557 fprintf (ira_dump_file, " %d", i);
4559 fprintf (ira_dump_file, "\n");
4562 return x;
4565 /* This is called by reload every time a new stack slot X with
4566 TOTAL_SIZE was allocated for REGNO. We store this info for
4567 subsequent ira_reuse_stack_slot calls. */
4568 void
4569 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4571 struct ira_spilled_reg_stack_slot *slot;
4572 int slot_num;
4573 ira_allocno_t allocno;
4575 ira_assert (! ira_use_lra_p);
4577 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4578 allocno = ira_regno_allocno_map[regno];
4579 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4580 if (slot_num == -1)
4582 slot_num = ira_spilled_reg_stack_slots_num++;
4583 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4585 slot = &ira_spilled_reg_stack_slots[slot_num];
4586 INIT_REG_SET (&slot->spilled_regs);
4587 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4588 slot->mem = x;
4589 slot->width = total_size;
4590 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4591 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4592 regno, REG_FREQ (regno), slot_num);
4596 /* Return spill cost for pseudo-registers whose numbers are in array
4597 REGNOS (with a negative number as an end marker) for reload with
4598 given IN and OUT for INSN. Return also number points (through
4599 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4600 the register pressure is high, number of references of the
4601 pseudo-registers (through NREFS), number of callee-clobbered
4602 hard-registers occupied by the pseudo-registers (through
4603 CALL_USED_COUNT), and the first hard regno occupied by the
4604 pseudo-registers (through FIRST_HARD_REGNO). */
4605 static int
4606 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
4607 int *excess_pressure_live_length,
4608 int *nrefs, int *call_used_count, int *first_hard_regno)
4610 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4611 bool in_p, out_p;
4612 int length;
4613 ira_allocno_t a;
4615 *nrefs = 0;
4616 for (length = count = cost = i = 0;; i++)
4618 regno = regnos[i];
4619 if (regno < 0)
4620 break;
4621 *nrefs += REG_N_REFS (regno);
4622 hard_regno = reg_renumber[regno];
4623 ira_assert (hard_regno >= 0);
4624 a = ira_regno_allocno_map[regno];
4625 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4626 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4627 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4628 for (j = 0; j < nregs; j++)
4629 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4630 break;
4631 if (j == nregs)
4632 count++;
4633 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4634 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4635 if ((in_p || out_p)
4636 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4638 saved_cost = 0;
4639 if (in_p)
4640 saved_cost += ira_memory_move_cost
4641 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4642 if (out_p)
4643 saved_cost
4644 += ira_memory_move_cost
4645 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4646 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4649 *excess_pressure_live_length = length;
4650 *call_used_count = count;
4651 hard_regno = -1;
4652 if (regnos[0] >= 0)
4654 hard_regno = reg_renumber[regnos[0]];
4656 *first_hard_regno = hard_regno;
4657 return cost;
4660 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4661 REGNOS is better than spilling pseudo-registers with numbers in
4662 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4663 function used by the reload pass to make better register spilling
4664 decisions. */
4665 bool
4666 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4667 rtx in, rtx out, rtx_insn *insn)
4669 int cost, other_cost;
4670 int length, other_length;
4671 int nrefs, other_nrefs;
4672 int call_used_count, other_call_used_count;
4673 int hard_regno, other_hard_regno;
4675 cost = calculate_spill_cost (regnos, in, out, insn,
4676 &length, &nrefs, &call_used_count, &hard_regno);
4677 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4678 &other_length, &other_nrefs,
4679 &other_call_used_count,
4680 &other_hard_regno);
4681 if (nrefs == 0 && other_nrefs != 0)
4682 return true;
4683 if (nrefs != 0 && other_nrefs == 0)
4684 return false;
4685 if (cost != other_cost)
4686 return cost < other_cost;
4687 if (length != other_length)
4688 return length > other_length;
4689 #ifdef REG_ALLOC_ORDER
4690 if (hard_regno >= 0 && other_hard_regno >= 0)
4691 return (inv_reg_alloc_order[hard_regno]
4692 < inv_reg_alloc_order[other_hard_regno]);
4693 #else
4694 if (call_used_count != other_call_used_count)
4695 return call_used_count > other_call_used_count;
4696 #endif
4697 return false;
4702 /* Allocate and initialize data necessary for assign_hard_reg. */
4703 void
4704 ira_initiate_assign (void)
4706 sorted_allocnos
4707 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4708 * ira_allocnos_num);
4709 consideration_allocno_bitmap = ira_allocate_bitmap ();
4710 initiate_cost_update ();
4711 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4712 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4713 * sizeof (ira_copy_t));
4716 /* Deallocate data used by assign_hard_reg. */
4717 void
4718 ira_finish_assign (void)
4720 ira_free (sorted_allocnos);
4721 ira_free_bitmap (consideration_allocno_bitmap);
4722 finish_cost_update ();
4723 ira_free (allocno_priorities);
4724 ira_free (sorted_copies);
4729 /* Entry function doing color-based register allocation. */
4730 static void
4731 color (void)
4733 allocno_stack_vec.create (ira_allocnos_num);
4734 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4735 ira_initiate_assign ();
4736 do_coloring ();
4737 ira_finish_assign ();
4738 allocno_stack_vec.release ();
4739 move_spill_restore ();
4744 /* This page contains a simple register allocator without usage of
4745 allocno conflicts. This is used for fast allocation for -O0. */
4747 /* Do register allocation by not using allocno conflicts. It uses
4748 only allocno live ranges. The algorithm is close to Chow's
4749 priority coloring. */
4750 static void
4751 fast_allocation (void)
4753 int i, j, k, num, class_size, hard_regno;
4754 #ifdef STACK_REGS
4755 bool no_stack_reg_p;
4756 #endif
4757 enum reg_class aclass;
4758 machine_mode mode;
4759 ira_allocno_t a;
4760 ira_allocno_iterator ai;
4761 live_range_t r;
4762 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4764 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4765 * ira_allocnos_num);
4766 num = 0;
4767 FOR_EACH_ALLOCNO (a, ai)
4768 sorted_allocnos[num++] = a;
4769 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4770 setup_allocno_priorities (sorted_allocnos, num);
4771 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4772 * ira_max_point);
4773 for (i = 0; i < ira_max_point; i++)
4774 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4775 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4776 allocno_priority_compare_func);
4777 for (i = 0; i < num; i++)
4779 int nr, l;
4781 a = sorted_allocnos[i];
4782 nr = ALLOCNO_NUM_OBJECTS (a);
4783 CLEAR_HARD_REG_SET (conflict_hard_regs);
4784 for (l = 0; l < nr; l++)
4786 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4787 IOR_HARD_REG_SET (conflict_hard_regs,
4788 OBJECT_CONFLICT_HARD_REGS (obj));
4789 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4790 for (j = r->start; j <= r->finish; j++)
4791 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4793 aclass = ALLOCNO_CLASS (a);
4794 ALLOCNO_ASSIGNED_P (a) = true;
4795 ALLOCNO_HARD_REGNO (a) = -1;
4796 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4797 conflict_hard_regs))
4798 continue;
4799 mode = ALLOCNO_MODE (a);
4800 #ifdef STACK_REGS
4801 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4802 #endif
4803 class_size = ira_class_hard_regs_num[aclass];
4804 for (j = 0; j < class_size; j++)
4806 hard_regno = ira_class_hard_regs[aclass][j];
4807 #ifdef STACK_REGS
4808 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4809 && hard_regno <= LAST_STACK_REG)
4810 continue;
4811 #endif
4812 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4813 || (TEST_HARD_REG_BIT
4814 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4815 continue;
4816 ALLOCNO_HARD_REGNO (a) = hard_regno;
4817 for (l = 0; l < nr; l++)
4819 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4820 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4821 for (k = r->start; k <= r->finish; k++)
4822 IOR_HARD_REG_SET (used_hard_regs[k],
4823 ira_reg_mode_hard_regset[hard_regno][mode]);
4825 break;
4828 ira_free (sorted_allocnos);
4829 ira_free (used_hard_regs);
4830 ira_free (allocno_priorities);
4831 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4832 ira_print_disposition (ira_dump_file);
4837 /* Entry function doing coloring. */
4838 void
4839 ira_color (void)
4841 ira_allocno_t a;
4842 ira_allocno_iterator ai;
4844 /* Setup updated costs. */
4845 FOR_EACH_ALLOCNO (a, ai)
4847 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4848 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4850 if (ira_conflicts_p)
4851 color ();
4852 else
4853 fast_allocation ();