cp/
[official-gcc.git] / gcc / ira-color.c
blob6c535072793bb8c321390e0713c1704af9d5a91a
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2015 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "sbitmap.h"
31 #include "bitmap.h"
32 #include "hard-reg-set.h"
33 #include "predict.h"
34 #include "function.h"
35 #include "dominance.h"
36 #include "cfg.h"
37 #include "basic-block.h"
38 #include "symtab.h"
39 #include "alias.h"
40 #include "tree.h"
41 #include "insn-config.h"
42 #include "expmed.h"
43 #include "dojump.h"
44 #include "explow.h"
45 #include "calls.h"
46 #include "emit-rtl.h"
47 #include "varasm.h"
48 #include "stmt.h"
49 #include "expr.h"
50 #include "diagnostic-core.h"
51 #include "reload.h"
52 #include "params.h"
53 #include "df.h"
54 #include "ira-int.h"
56 typedef struct allocno_hard_regs *allocno_hard_regs_t;
58 /* The structure contains information about hard registers can be
59 assigned to allocnos. Usually it is allocno profitable hard
60 registers but in some cases this set can be a bit different. Major
61 reason of the difference is a requirement to use hard register sets
62 that form a tree or a forest (set of trees), i.e. hard register set
63 of a node should contain hard register sets of its subnodes. */
64 struct allocno_hard_regs
66 /* Hard registers can be assigned to an allocno. */
67 HARD_REG_SET set;
68 /* Overall (spilling) cost of all allocnos with given register
69 set. */
70 int64_t cost;
73 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
75 /* A node representing allocno hard registers. Such nodes form a
76 forest (set of trees). Each subnode of given node in the forest
77 refers for hard register set (usually allocno profitable hard
78 register set) which is a subset of one referred from given
79 node. */
80 struct allocno_hard_regs_node
82 /* Set up number of the node in preorder traversing of the forest. */
83 int preorder_num;
84 /* Used for different calculation like finding conflict size of an
85 allocno. */
86 int check;
87 /* Used for calculation of conflict size of an allocno. The
88 conflict size of the allocno is maximal number of given allocno
89 hard registers needed for allocation of the conflicting allocnos.
90 Given allocno is trivially colored if this number plus the number
91 of hard registers needed for given allocno is not greater than
92 the number of given allocno hard register set. */
93 int conflict_size;
94 /* The number of hard registers given by member hard_regs. */
95 int hard_regs_num;
96 /* The following member is used to form the final forest. */
97 bool used_p;
98 /* Pointer to the corresponding profitable hard registers. */
99 allocno_hard_regs_t hard_regs;
100 /* Parent, first subnode, previous and next node with the same
101 parent in the forest. */
102 allocno_hard_regs_node_t parent, first, prev, next;
105 /* Info about changing hard reg costs of an allocno. */
106 struct update_cost_record
108 /* Hard regno for which we changed the cost. */
109 int hard_regno;
110 /* Divisor used when we changed the cost of HARD_REGNO. */
111 int divisor;
112 /* Next record for given allocno. */
113 struct update_cost_record *next;
115 /* Pool allocation new operator. */
116 inline void *operator new (size_t)
118 return pool.allocate ();
121 /* Delete operator utilizing pool allocation. */
122 inline void operator delete (void *ptr)
124 pool.remove ((update_cost_record *) ptr);
127 /* Memory allocation pool. */
128 static pool_allocator<update_cost_record> pool;
131 /* To decrease footprint of ira_allocno structure we store all data
132 needed only for coloring in the following structure. */
133 struct allocno_color_data
135 /* TRUE value means that the allocno was not removed yet from the
136 conflicting graph during coloring. */
137 unsigned int in_graph_p : 1;
138 /* TRUE if it is put on the stack to make other allocnos
139 colorable. */
140 unsigned int may_be_spilled_p : 1;
141 /* TRUE if the allocno is trivially colorable. */
142 unsigned int colorable_p : 1;
143 /* Number of hard registers of the allocno class really
144 available for the allocno allocation. It is number of the
145 profitable hard regs. */
146 int available_regs_num;
147 /* Allocnos in a bucket (used in coloring) chained by the following
148 two members. */
149 ira_allocno_t next_bucket_allocno;
150 ira_allocno_t prev_bucket_allocno;
151 /* Used for temporary purposes. */
152 int temp;
153 /* Used to exclude repeated processing. */
154 int last_process;
155 /* Profitable hard regs available for this pseudo allocation. It
156 means that the set excludes unavailable hard regs and hard regs
157 conflicting with given pseudo. They should be of the allocno
158 class. */
159 HARD_REG_SET profitable_hard_regs;
160 /* The allocno hard registers node. */
161 allocno_hard_regs_node_t hard_regs_node;
162 /* Array of structures allocno_hard_regs_subnode representing
163 given allocno hard registers node (the 1st element in the array)
164 and all its subnodes in the tree (forest) of allocno hard
165 register nodes (see comments above). */
166 int hard_regs_subnodes_start;
167 /* The length of the previous array. */
168 int hard_regs_subnodes_num;
169 /* Records about updating allocno hard reg costs from copies. If
170 the allocno did not get expected hard register, these records are
171 used to restore original hard reg costs of allocnos connected to
172 this allocno by copies. */
173 struct update_cost_record *update_cost_records;
174 /* Threads. We collect allocnos connected by copies into threads
175 and try to assign hard regs to allocnos by threads. */
176 /* Allocno representing all thread. */
177 ira_allocno_t first_thread_allocno;
178 /* Allocnos in thread forms a cycle list through the following
179 member. */
180 ira_allocno_t next_thread_allocno;
181 /* All thread frequency. Defined only for first thread allocno. */
182 int thread_freq;
185 /* See above. */
186 typedef struct allocno_color_data *allocno_color_data_t;
188 /* Container for storing allocno data concerning coloring. */
189 static allocno_color_data_t allocno_color_data;
191 /* Macro to access the data concerning coloring. */
192 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
194 /* Used for finding allocno colorability to exclude repeated allocno
195 processing and for updating preferencing to exclude repeated
196 allocno processing during assignment. */
197 static int curr_allocno_process;
199 /* This file contains code for regional graph coloring, spill/restore
200 code placement optimization, and code helping the reload pass to do
201 a better job. */
203 /* Bitmap of allocnos which should be colored. */
204 static bitmap coloring_allocno_bitmap;
206 /* Bitmap of allocnos which should be taken into account during
207 coloring. In general case it contains allocnos from
208 coloring_allocno_bitmap plus other already colored conflicting
209 allocnos. */
210 static bitmap consideration_allocno_bitmap;
212 /* All allocnos sorted according their priorities. */
213 static ira_allocno_t *sorted_allocnos;
215 /* Vec representing the stack of allocnos used during coloring. */
216 static vec<ira_allocno_t> allocno_stack_vec;
218 /* Helper for qsort comparison callbacks - return a positive integer if
219 X > Y, or a negative value otherwise. Use a conditional expression
220 instead of a difference computation to insulate from possible overflow
221 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
222 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
226 /* Definition of vector of allocno hard registers. */
228 /* Vector of unique allocno hard registers. */
229 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
231 struct allocno_hard_regs_hasher : typed_noop_remove <allocno_hard_regs>
233 typedef allocno_hard_regs *value_type;
234 typedef allocno_hard_regs *compare_type;
235 static inline hashval_t hash (const allocno_hard_regs *);
236 static inline bool equal (const allocno_hard_regs *,
237 const allocno_hard_regs *);
240 /* Returns hash value for allocno hard registers V. */
241 inline hashval_t
242 allocno_hard_regs_hasher::hash (const allocno_hard_regs *hv)
244 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
247 /* Compares allocno hard registers V1 and V2. */
248 inline bool
249 allocno_hard_regs_hasher::equal (const allocno_hard_regs *hv1,
250 const allocno_hard_regs *hv2)
252 return hard_reg_set_equal_p (hv1->set, hv2->set);
255 /* Hash table of unique allocno hard registers. */
256 static hash_table<allocno_hard_regs_hasher> *allocno_hard_regs_htab;
258 /* Return allocno hard registers in the hash table equal to HV. */
259 static allocno_hard_regs_t
260 find_hard_regs (allocno_hard_regs_t hv)
262 return allocno_hard_regs_htab->find (hv);
265 /* Insert allocno hard registers HV in the hash table (if it is not
266 there yet) and return the value which in the table. */
267 static allocno_hard_regs_t
268 insert_hard_regs (allocno_hard_regs_t hv)
270 allocno_hard_regs **slot = allocno_hard_regs_htab->find_slot (hv, INSERT);
272 if (*slot == NULL)
273 *slot = hv;
274 return *slot;
277 /* Initialize data concerning allocno hard registers. */
278 static void
279 init_allocno_hard_regs (void)
281 allocno_hard_regs_vec.create (200);
282 allocno_hard_regs_htab
283 = new hash_table<allocno_hard_regs_hasher> (200);
286 /* Add (or update info about) allocno hard registers with SET and
287 COST. */
288 static allocno_hard_regs_t
289 add_allocno_hard_regs (HARD_REG_SET set, int64_t cost)
291 struct allocno_hard_regs temp;
292 allocno_hard_regs_t hv;
294 gcc_assert (! hard_reg_set_empty_p (set));
295 COPY_HARD_REG_SET (temp.set, set);
296 if ((hv = find_hard_regs (&temp)) != NULL)
297 hv->cost += cost;
298 else
300 hv = ((struct allocno_hard_regs *)
301 ira_allocate (sizeof (struct allocno_hard_regs)));
302 COPY_HARD_REG_SET (hv->set, set);
303 hv->cost = cost;
304 allocno_hard_regs_vec.safe_push (hv);
305 insert_hard_regs (hv);
307 return hv;
310 /* Finalize data concerning allocno hard registers. */
311 static void
312 finish_allocno_hard_regs (void)
314 int i;
315 allocno_hard_regs_t hv;
317 for (i = 0;
318 allocno_hard_regs_vec.iterate (i, &hv);
319 i++)
320 ira_free (hv);
321 delete allocno_hard_regs_htab;
322 allocno_hard_regs_htab = NULL;
323 allocno_hard_regs_vec.release ();
326 /* Sort hard regs according to their frequency of usage. */
327 static int
328 allocno_hard_regs_compare (const void *v1p, const void *v2p)
330 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
331 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
333 if (hv2->cost > hv1->cost)
334 return 1;
335 else if (hv2->cost < hv1->cost)
336 return -1;
337 else
338 return 0;
343 /* Used for finding a common ancestor of two allocno hard registers
344 nodes in the forest. We use the current value of
345 'node_check_tick' to mark all nodes from one node to the top and
346 then walking up from another node until we find a marked node.
348 It is also used to figure out allocno colorability as a mark that
349 we already reset value of member 'conflict_size' for the forest
350 node corresponding to the processed allocno. */
351 static int node_check_tick;
353 /* Roots of the forest containing hard register sets can be assigned
354 to allocnos. */
355 static allocno_hard_regs_node_t hard_regs_roots;
357 /* Definition of vector of allocno hard register nodes. */
359 /* Vector used to create the forest. */
360 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
362 /* Create and return allocno hard registers node containing allocno
363 hard registers HV. */
364 static allocno_hard_regs_node_t
365 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
367 allocno_hard_regs_node_t new_node;
369 new_node = ((struct allocno_hard_regs_node *)
370 ira_allocate (sizeof (struct allocno_hard_regs_node)));
371 new_node->check = 0;
372 new_node->hard_regs = hv;
373 new_node->hard_regs_num = hard_reg_set_size (hv->set);
374 new_node->first = NULL;
375 new_node->used_p = false;
376 return new_node;
379 /* Add allocno hard registers node NEW_NODE to the forest on its level
380 given by ROOTS. */
381 static void
382 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
383 allocno_hard_regs_node_t new_node)
385 new_node->next = *roots;
386 if (new_node->next != NULL)
387 new_node->next->prev = new_node;
388 new_node->prev = NULL;
389 *roots = new_node;
392 /* Add allocno hard registers HV (or its best approximation if it is
393 not possible) to the forest on its level given by ROOTS. */
394 static void
395 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
396 allocno_hard_regs_t hv)
398 unsigned int i, start;
399 allocno_hard_regs_node_t node, prev, new_node;
400 HARD_REG_SET temp_set;
401 allocno_hard_regs_t hv2;
403 start = hard_regs_node_vec.length ();
404 for (node = *roots; node != NULL; node = node->next)
406 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
407 return;
408 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
410 add_allocno_hard_regs_to_forest (&node->first, hv);
411 return;
413 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
414 hard_regs_node_vec.safe_push (node);
415 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
417 COPY_HARD_REG_SET (temp_set, hv->set);
418 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
419 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
420 add_allocno_hard_regs_to_forest (&node->first, hv2);
423 if (hard_regs_node_vec.length ()
424 > start + 1)
426 /* Create a new node which contains nodes in hard_regs_node_vec. */
427 CLEAR_HARD_REG_SET (temp_set);
428 for (i = start;
429 i < hard_regs_node_vec.length ();
430 i++)
432 node = hard_regs_node_vec[i];
433 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
435 hv = add_allocno_hard_regs (temp_set, hv->cost);
436 new_node = create_new_allocno_hard_regs_node (hv);
437 prev = NULL;
438 for (i = start;
439 i < hard_regs_node_vec.length ();
440 i++)
442 node = hard_regs_node_vec[i];
443 if (node->prev == NULL)
444 *roots = node->next;
445 else
446 node->prev->next = node->next;
447 if (node->next != NULL)
448 node->next->prev = node->prev;
449 if (prev == NULL)
450 new_node->first = node;
451 else
452 prev->next = node;
453 node->prev = prev;
454 node->next = NULL;
455 prev = node;
457 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
459 hard_regs_node_vec.truncate (start);
462 /* Add allocno hard registers nodes starting with the forest level
463 given by FIRST which contains biggest set inside SET. */
464 static void
465 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
466 HARD_REG_SET set)
468 allocno_hard_regs_node_t node;
470 ira_assert (first != NULL);
471 for (node = first; node != NULL; node = node->next)
472 if (hard_reg_set_subset_p (node->hard_regs->set, set))
473 hard_regs_node_vec.safe_push (node);
474 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
475 collect_allocno_hard_regs_cover (node->first, set);
478 /* Set up field parent as PARENT in all allocno hard registers nodes
479 in forest given by FIRST. */
480 static void
481 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
482 allocno_hard_regs_node_t parent)
484 allocno_hard_regs_node_t node;
486 for (node = first; node != NULL; node = node->next)
488 node->parent = parent;
489 setup_allocno_hard_regs_nodes_parent (node->first, node);
493 /* Return allocno hard registers node which is a first common ancestor
494 node of FIRST and SECOND in the forest. */
495 static allocno_hard_regs_node_t
496 first_common_ancestor_node (allocno_hard_regs_node_t first,
497 allocno_hard_regs_node_t second)
499 allocno_hard_regs_node_t node;
501 node_check_tick++;
502 for (node = first; node != NULL; node = node->parent)
503 node->check = node_check_tick;
504 for (node = second; node != NULL; node = node->parent)
505 if (node->check == node_check_tick)
506 return node;
507 return first_common_ancestor_node (second, first);
510 /* Print hard reg set SET to F. */
511 static void
512 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
514 int i, start;
516 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
518 if (TEST_HARD_REG_BIT (set, i))
520 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
521 start = i;
523 if (start >= 0
524 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
526 if (start == i - 1)
527 fprintf (f, " %d", start);
528 else if (start == i - 2)
529 fprintf (f, " %d %d", start, start + 1);
530 else
531 fprintf (f, " %d-%d", start, i - 1);
532 start = -1;
535 if (new_line_p)
536 fprintf (f, "\n");
539 /* Print allocno hard register subforest given by ROOTS and its LEVEL
540 to F. */
541 static void
542 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
543 int level)
545 int i;
546 allocno_hard_regs_node_t node;
548 for (node = roots; node != NULL; node = node->next)
550 fprintf (f, " ");
551 for (i = 0; i < level * 2; i++)
552 fprintf (f, " ");
553 fprintf (f, "%d:(", node->preorder_num);
554 print_hard_reg_set (f, node->hard_regs->set, false);
555 fprintf (f, ")@%" PRId64"\n", node->hard_regs->cost);
556 print_hard_regs_subforest (f, node->first, level + 1);
560 /* Print the allocno hard register forest to F. */
561 static void
562 print_hard_regs_forest (FILE *f)
564 fprintf (f, " Hard reg set forest:\n");
565 print_hard_regs_subforest (f, hard_regs_roots, 1);
568 /* Print the allocno hard register forest to stderr. */
569 void
570 ira_debug_hard_regs_forest (void)
572 print_hard_regs_forest (stderr);
575 /* Remove unused allocno hard registers nodes from forest given by its
576 *ROOTS. */
577 static void
578 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
580 allocno_hard_regs_node_t node, prev, next, last;
582 for (prev = NULL, node = *roots; node != NULL; node = next)
584 next = node->next;
585 if (node->used_p)
587 remove_unused_allocno_hard_regs_nodes (&node->first);
588 prev = node;
590 else
592 for (last = node->first;
593 last != NULL && last->next != NULL;
594 last = last->next)
596 if (last != NULL)
598 if (prev == NULL)
599 *roots = node->first;
600 else
601 prev->next = node->first;
602 if (next != NULL)
603 next->prev = last;
604 last->next = next;
605 next = node->first;
607 else
609 if (prev == NULL)
610 *roots = next;
611 else
612 prev->next = next;
613 if (next != NULL)
614 next->prev = prev;
616 ira_free (node);
621 /* Set up fields preorder_num starting with START_NUM in all allocno
622 hard registers nodes in forest given by FIRST. Return biggest set
623 PREORDER_NUM increased by 1. */
624 static int
625 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
626 allocno_hard_regs_node_t parent,
627 int start_num)
629 allocno_hard_regs_node_t node;
631 for (node = first; node != NULL; node = node->next)
633 node->preorder_num = start_num++;
634 node->parent = parent;
635 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
636 start_num);
638 return start_num;
641 /* Number of allocno hard registers nodes in the forest. */
642 static int allocno_hard_regs_nodes_num;
644 /* Table preorder number of allocno hard registers node in the forest
645 -> the allocno hard registers node. */
646 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
648 /* See below. */
649 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
651 /* The structure is used to describes all subnodes (not only immediate
652 ones) in the mentioned above tree for given allocno hard register
653 node. The usage of such data accelerates calculation of
654 colorability of given allocno. */
655 struct allocno_hard_regs_subnode
657 /* The conflict size of conflicting allocnos whose hard register
658 sets are equal sets (plus supersets if given node is given
659 allocno hard registers node) of one in the given node. */
660 int left_conflict_size;
661 /* The summary conflict size of conflicting allocnos whose hard
662 register sets are strict subsets of one in the given node.
663 Overall conflict size is
664 left_conflict_subnodes_size
665 + MIN (max_node_impact - left_conflict_subnodes_size,
666 left_conflict_size)
668 short left_conflict_subnodes_size;
669 short max_node_impact;
672 /* Container for hard regs subnodes of all allocnos. */
673 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
675 /* Table (preorder number of allocno hard registers node in the
676 forest, preorder number of allocno hard registers subnode) -> index
677 of the subnode relative to the node. -1 if it is not a
678 subnode. */
679 static int *allocno_hard_regs_subnode_index;
681 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
682 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
683 static void
684 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
686 allocno_hard_regs_node_t node, parent;
687 int index;
689 for (node = first; node != NULL; node = node->next)
691 allocno_hard_regs_nodes[node->preorder_num] = node;
692 for (parent = node; parent != NULL; parent = parent->parent)
694 index = parent->preorder_num * allocno_hard_regs_nodes_num;
695 allocno_hard_regs_subnode_index[index + node->preorder_num]
696 = node->preorder_num - parent->preorder_num;
698 setup_allocno_hard_regs_subnode_index (node->first);
702 /* Count all allocno hard registers nodes in tree ROOT. */
703 static int
704 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
706 int len = 1;
708 for (root = root->first; root != NULL; root = root->next)
709 len += get_allocno_hard_regs_subnodes_num (root);
710 return len;
713 /* Build the forest of allocno hard registers nodes and assign each
714 allocno a node from the forest. */
715 static void
716 form_allocno_hard_regs_nodes_forest (void)
718 unsigned int i, j, size, len;
719 int start;
720 ira_allocno_t a;
721 allocno_hard_regs_t hv;
722 bitmap_iterator bi;
723 HARD_REG_SET temp;
724 allocno_hard_regs_node_t node, allocno_hard_regs_node;
725 allocno_color_data_t allocno_data;
727 node_check_tick = 0;
728 init_allocno_hard_regs ();
729 hard_regs_roots = NULL;
730 hard_regs_node_vec.create (100);
731 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
732 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
734 CLEAR_HARD_REG_SET (temp);
735 SET_HARD_REG_BIT (temp, i);
736 hv = add_allocno_hard_regs (temp, 0);
737 node = create_new_allocno_hard_regs_node (hv);
738 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
740 start = allocno_hard_regs_vec.length ();
741 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
743 a = ira_allocnos[i];
744 allocno_data = ALLOCNO_COLOR_DATA (a);
746 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
747 continue;
748 hv = (add_allocno_hard_regs
749 (allocno_data->profitable_hard_regs,
750 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
752 SET_HARD_REG_SET (temp);
753 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
754 add_allocno_hard_regs (temp, 0);
755 qsort (allocno_hard_regs_vec.address () + start,
756 allocno_hard_regs_vec.length () - start,
757 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
758 for (i = start;
759 allocno_hard_regs_vec.iterate (i, &hv);
760 i++)
762 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
763 ira_assert (hard_regs_node_vec.length () == 0);
765 /* We need to set up parent fields for right work of
766 first_common_ancestor_node. */
767 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
768 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
770 a = ira_allocnos[i];
771 allocno_data = ALLOCNO_COLOR_DATA (a);
772 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
773 continue;
774 hard_regs_node_vec.truncate (0);
775 collect_allocno_hard_regs_cover (hard_regs_roots,
776 allocno_data->profitable_hard_regs);
777 allocno_hard_regs_node = NULL;
778 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
779 allocno_hard_regs_node
780 = (j == 0
781 ? node
782 : first_common_ancestor_node (node, allocno_hard_regs_node));
783 /* That is a temporary storage. */
784 allocno_hard_regs_node->used_p = true;
785 allocno_data->hard_regs_node = allocno_hard_regs_node;
787 ira_assert (hard_regs_roots->next == NULL);
788 hard_regs_roots->used_p = true;
789 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
790 allocno_hard_regs_nodes_num
791 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
792 allocno_hard_regs_nodes
793 = ((allocno_hard_regs_node_t *)
794 ira_allocate (allocno_hard_regs_nodes_num
795 * sizeof (allocno_hard_regs_node_t)));
796 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
797 allocno_hard_regs_subnode_index
798 = (int *) ira_allocate (size * sizeof (int));
799 for (i = 0; i < size; i++)
800 allocno_hard_regs_subnode_index[i] = -1;
801 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
802 start = 0;
803 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
805 a = ira_allocnos[i];
806 allocno_data = ALLOCNO_COLOR_DATA (a);
807 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
808 continue;
809 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
810 allocno_data->hard_regs_subnodes_start = start;
811 allocno_data->hard_regs_subnodes_num = len;
812 start += len;
814 allocno_hard_regs_subnodes
815 = ((allocno_hard_regs_subnode_t)
816 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
817 hard_regs_node_vec.release ();
820 /* Free tree of allocno hard registers nodes given by its ROOT. */
821 static void
822 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
824 allocno_hard_regs_node_t child, next;
826 for (child = root->first; child != NULL; child = next)
828 next = child->next;
829 finish_allocno_hard_regs_nodes_tree (child);
831 ira_free (root);
834 /* Finish work with the forest of allocno hard registers nodes. */
835 static void
836 finish_allocno_hard_regs_nodes_forest (void)
838 allocno_hard_regs_node_t node, next;
840 ira_free (allocno_hard_regs_subnodes);
841 for (node = hard_regs_roots; node != NULL; node = next)
843 next = node->next;
844 finish_allocno_hard_regs_nodes_tree (node);
846 ira_free (allocno_hard_regs_nodes);
847 ira_free (allocno_hard_regs_subnode_index);
848 finish_allocno_hard_regs ();
851 /* Set up left conflict sizes and left conflict subnodes sizes of hard
852 registers subnodes of allocno A. Return TRUE if allocno A is
853 trivially colorable. */
854 static bool
855 setup_left_conflict_sizes_p (ira_allocno_t a)
857 int i, k, nobj, start;
858 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
859 allocno_color_data_t data;
860 HARD_REG_SET profitable_hard_regs;
861 allocno_hard_regs_subnode_t subnodes;
862 allocno_hard_regs_node_t node;
863 HARD_REG_SET node_set;
865 nobj = ALLOCNO_NUM_OBJECTS (a);
866 data = ALLOCNO_COLOR_DATA (a);
867 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
868 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
869 node = data->hard_regs_node;
870 node_preorder_num = node->preorder_num;
871 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
872 node_check_tick++;
873 for (k = 0; k < nobj; k++)
875 ira_object_t obj = ALLOCNO_OBJECT (a, k);
876 ira_object_t conflict_obj;
877 ira_object_conflict_iterator oci;
879 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
881 int size;
882 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
883 allocno_hard_regs_node_t conflict_node, temp_node;
884 HARD_REG_SET conflict_node_set;
885 allocno_color_data_t conflict_data;
887 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
888 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
889 || ! hard_reg_set_intersect_p (profitable_hard_regs,
890 conflict_data
891 ->profitable_hard_regs))
892 continue;
893 conflict_node = conflict_data->hard_regs_node;
894 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
895 if (hard_reg_set_subset_p (node_set, conflict_node_set))
896 temp_node = node;
897 else
899 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
900 temp_node = conflict_node;
902 if (temp_node->check != node_check_tick)
904 temp_node->check = node_check_tick;
905 temp_node->conflict_size = 0;
907 size = (ira_reg_class_max_nregs
908 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
909 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
910 /* We will deal with the subwords individually. */
911 size = 1;
912 temp_node->conflict_size += size;
915 for (i = 0; i < data->hard_regs_subnodes_num; i++)
917 allocno_hard_regs_node_t temp_node;
919 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
920 ira_assert (temp_node->preorder_num == i + node_preorder_num);
921 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
922 ? 0 : temp_node->conflict_size);
923 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
924 profitable_hard_regs))
925 subnodes[i].max_node_impact = temp_node->hard_regs_num;
926 else
928 HARD_REG_SET temp_set;
929 int j, n, hard_regno;
930 enum reg_class aclass;
932 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
933 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
934 aclass = ALLOCNO_CLASS (a);
935 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
937 hard_regno = ira_class_hard_regs[aclass][j];
938 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
939 n++;
941 subnodes[i].max_node_impact = n;
943 subnodes[i].left_conflict_subnodes_size = 0;
945 start = node_preorder_num * allocno_hard_regs_nodes_num;
946 for (i = data->hard_regs_subnodes_num - 1; i > 0; i--)
948 int size, parent_i;
949 allocno_hard_regs_node_t parent;
951 size = (subnodes[i].left_conflict_subnodes_size
952 + MIN (subnodes[i].max_node_impact
953 - subnodes[i].left_conflict_subnodes_size,
954 subnodes[i].left_conflict_size));
955 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
956 gcc_checking_assert(parent);
957 parent_i
958 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
959 gcc_checking_assert(parent_i >= 0);
960 subnodes[parent_i].left_conflict_subnodes_size += size;
962 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
963 conflict_size
964 = (left_conflict_subnodes_size
965 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
966 subnodes[0].left_conflict_size));
967 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
968 data->colorable_p = conflict_size <= data->available_regs_num;
969 return data->colorable_p;
972 /* Update left conflict sizes of hard registers subnodes of allocno A
973 after removing allocno REMOVED_A with SIZE from the conflict graph.
974 Return TRUE if A is trivially colorable. */
975 static bool
976 update_left_conflict_sizes_p (ira_allocno_t a,
977 ira_allocno_t removed_a, int size)
979 int i, conflict_size, before_conflict_size, diff, start;
980 int node_preorder_num, parent_i;
981 allocno_hard_regs_node_t node, removed_node, parent;
982 allocno_hard_regs_subnode_t subnodes;
983 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
985 ira_assert (! data->colorable_p);
986 node = data->hard_regs_node;
987 node_preorder_num = node->preorder_num;
988 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
989 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
990 node->hard_regs->set)
991 || hard_reg_set_subset_p (node->hard_regs->set,
992 removed_node->hard_regs->set));
993 start = node_preorder_num * allocno_hard_regs_nodes_num;
994 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
995 if (i < 0)
996 i = 0;
997 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
998 before_conflict_size
999 = (subnodes[i].left_conflict_subnodes_size
1000 + MIN (subnodes[i].max_node_impact
1001 - subnodes[i].left_conflict_subnodes_size,
1002 subnodes[i].left_conflict_size));
1003 subnodes[i].left_conflict_size -= size;
1004 for (;;)
1006 conflict_size
1007 = (subnodes[i].left_conflict_subnodes_size
1008 + MIN (subnodes[i].max_node_impact
1009 - subnodes[i].left_conflict_subnodes_size,
1010 subnodes[i].left_conflict_size));
1011 if ((diff = before_conflict_size - conflict_size) == 0)
1012 break;
1013 ira_assert (conflict_size < before_conflict_size);
1014 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
1015 if (parent == NULL)
1016 break;
1017 parent_i
1018 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
1019 if (parent_i < 0)
1020 break;
1021 i = parent_i;
1022 before_conflict_size
1023 = (subnodes[i].left_conflict_subnodes_size
1024 + MIN (subnodes[i].max_node_impact
1025 - subnodes[i].left_conflict_subnodes_size,
1026 subnodes[i].left_conflict_size));
1027 subnodes[i].left_conflict_subnodes_size -= diff;
1029 if (i != 0
1030 || (conflict_size
1031 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1032 > data->available_regs_num))
1033 return false;
1034 data->colorable_p = true;
1035 return true;
1038 /* Return true if allocno A has empty profitable hard regs. */
1039 static bool
1040 empty_profitable_hard_regs (ira_allocno_t a)
1042 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1044 return hard_reg_set_empty_p (data->profitable_hard_regs);
1047 /* Set up profitable hard registers for each allocno being
1048 colored. */
1049 static void
1050 setup_profitable_hard_regs (void)
1052 unsigned int i;
1053 int j, k, nobj, hard_regno, nregs, class_size;
1054 ira_allocno_t a;
1055 bitmap_iterator bi;
1056 enum reg_class aclass;
1057 machine_mode mode;
1058 allocno_color_data_t data;
1060 /* Initial set up from allocno classes and explicitly conflicting
1061 hard regs. */
1062 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1064 a = ira_allocnos[i];
1065 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1066 continue;
1067 data = ALLOCNO_COLOR_DATA (a);
1068 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1069 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a))
1070 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1071 else
1073 mode = ALLOCNO_MODE (a);
1074 COPY_HARD_REG_SET (data->profitable_hard_regs,
1075 ira_useful_class_mode_regs[aclass][mode]);
1076 nobj = ALLOCNO_NUM_OBJECTS (a);
1077 for (k = 0; k < nobj; k++)
1079 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1081 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1082 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1086 /* Exclude hard regs already assigned for conflicting objects. */
1087 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1089 a = ira_allocnos[i];
1090 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1091 || ! ALLOCNO_ASSIGNED_P (a)
1092 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1093 continue;
1094 mode = ALLOCNO_MODE (a);
1095 nregs = hard_regno_nregs[hard_regno][mode];
1096 nobj = ALLOCNO_NUM_OBJECTS (a);
1097 for (k = 0; k < nobj; k++)
1099 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1100 ira_object_t conflict_obj;
1101 ira_object_conflict_iterator oci;
1103 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1105 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1107 /* We can process the conflict allocno repeatedly with
1108 the same result. */
1109 if (nregs == nobj && nregs > 1)
1111 int num = OBJECT_SUBWORD (conflict_obj);
1113 if (REG_WORDS_BIG_ENDIAN)
1114 CLEAR_HARD_REG_BIT
1115 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1116 hard_regno + nobj - num - 1);
1117 else
1118 CLEAR_HARD_REG_BIT
1119 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1120 hard_regno + num);
1122 else
1123 AND_COMPL_HARD_REG_SET
1124 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1125 ira_reg_mode_hard_regset[hard_regno][mode]);
1129 /* Exclude too costly hard regs. */
1130 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1132 int min_cost = INT_MAX;
1133 int *costs;
1135 a = ira_allocnos[i];
1136 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1137 || empty_profitable_hard_regs (a))
1138 continue;
1139 data = ALLOCNO_COLOR_DATA (a);
1140 mode = ALLOCNO_MODE (a);
1141 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1142 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1144 class_size = ira_class_hard_regs_num[aclass];
1145 for (j = 0; j < class_size; j++)
1147 hard_regno = ira_class_hard_regs[aclass][j];
1148 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1149 hard_regno))
1150 continue;
1151 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j])
1152 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1153 hard_regno);
1154 else if (min_cost > costs[j])
1155 min_cost = costs[j];
1158 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1159 < ALLOCNO_UPDATED_CLASS_COST (a))
1160 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1161 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1162 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1168 /* This page contains functions used to choose hard registers for
1169 allocnos. */
1171 /* Pool for update cost records. */
1172 static pool_allocator<update_cost_record> update_cost_record_pool
1173 ("update cost records", 100);
1175 /* Return new update cost record with given params. */
1176 static struct update_cost_record *
1177 get_update_cost_record (int hard_regno, int divisor,
1178 struct update_cost_record *next)
1180 struct update_cost_record *record;
1182 record = update_cost_record_pool.allocate ();
1183 record->hard_regno = hard_regno;
1184 record->divisor = divisor;
1185 record->next = next;
1186 return record;
1189 /* Free memory for all records in LIST. */
1190 static void
1191 free_update_cost_record_list (struct update_cost_record *list)
1193 struct update_cost_record *next;
1195 while (list != NULL)
1197 next = list->next;
1198 update_cost_record_pool.remove (list);
1199 list = next;
1203 /* Free memory allocated for all update cost records. */
1204 static void
1205 finish_update_cost_records (void)
1207 update_cost_record_pool.release ();
1210 /* Array whose element value is TRUE if the corresponding hard
1211 register was already allocated for an allocno. */
1212 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1214 /* Describes one element in a queue of allocnos whose costs need to be
1215 updated. Each allocno in the queue is known to have an allocno
1216 class. */
1217 struct update_cost_queue_elem
1219 /* This element is in the queue iff CHECK == update_cost_check. */
1220 int check;
1222 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1223 connecting this allocno to the one being allocated. */
1224 int divisor;
1226 /* Allocno from which we are chaining costs of connected allocnos.
1227 It is used not go back in graph of allocnos connected by
1228 copies. */
1229 ira_allocno_t from;
1231 /* The next allocno in the queue, or null if this is the last element. */
1232 ira_allocno_t next;
1235 /* The first element in a queue of allocnos whose copy costs need to be
1236 updated. Null if the queue is empty. */
1237 static ira_allocno_t update_cost_queue;
1239 /* The last element in the queue described by update_cost_queue.
1240 Not valid if update_cost_queue is null. */
1241 static struct update_cost_queue_elem *update_cost_queue_tail;
1243 /* A pool of elements in the queue described by update_cost_queue.
1244 Elements are indexed by ALLOCNO_NUM. */
1245 static struct update_cost_queue_elem *update_cost_queue_elems;
1247 /* The current value of update_costs_from_copies call count. */
1248 static int update_cost_check;
1250 /* Allocate and initialize data necessary for function
1251 update_costs_from_copies. */
1252 static void
1253 initiate_cost_update (void)
1255 size_t size;
1257 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1258 update_cost_queue_elems
1259 = (struct update_cost_queue_elem *) ira_allocate (size);
1260 memset (update_cost_queue_elems, 0, size);
1261 update_cost_check = 0;
1264 /* Deallocate data used by function update_costs_from_copies. */
1265 static void
1266 finish_cost_update (void)
1268 ira_free (update_cost_queue_elems);
1269 finish_update_cost_records ();
1272 /* When we traverse allocnos to update hard register costs, the cost
1273 divisor will be multiplied by the following macro value for each
1274 hop from given allocno to directly connected allocnos. */
1275 #define COST_HOP_DIVISOR 4
1277 /* Start a new cost-updating pass. */
1278 static void
1279 start_update_cost (void)
1281 update_cost_check++;
1282 update_cost_queue = NULL;
1285 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1286 ALLOCNO is already in the queue, or has NO_REGS class. */
1287 static inline void
1288 queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
1290 struct update_cost_queue_elem *elem;
1292 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1293 if (elem->check != update_cost_check
1294 && ALLOCNO_CLASS (allocno) != NO_REGS)
1296 elem->check = update_cost_check;
1297 elem->from = from;
1298 elem->divisor = divisor;
1299 elem->next = NULL;
1300 if (update_cost_queue == NULL)
1301 update_cost_queue = allocno;
1302 else
1303 update_cost_queue_tail->next = allocno;
1304 update_cost_queue_tail = elem;
1308 /* Try to remove the first element from update_cost_queue. Return
1309 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1310 *DIVISOR) describe the removed element. */
1311 static inline bool
1312 get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
1314 struct update_cost_queue_elem *elem;
1316 if (update_cost_queue == NULL)
1317 return false;
1319 *allocno = update_cost_queue;
1320 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1321 *from = elem->from;
1322 *divisor = elem->divisor;
1323 update_cost_queue = elem->next;
1324 return true;
1327 /* Increase costs of HARD_REGNO by UPDATE_COST for ALLOCNO. Return
1328 true if we really modified the cost. */
1329 static bool
1330 update_allocno_cost (ira_allocno_t allocno, int hard_regno, int update_cost)
1332 int i;
1333 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1335 i = ira_class_hard_reg_index[aclass][hard_regno];
1336 if (i < 0)
1337 return false;
1338 ira_allocate_and_set_or_copy_costs
1339 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1340 ALLOCNO_UPDATED_CLASS_COST (allocno),
1341 ALLOCNO_HARD_REG_COSTS (allocno));
1342 ira_allocate_and_set_or_copy_costs
1343 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1344 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1345 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
1346 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_cost;
1347 return true;
1350 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1351 by copies to ALLOCNO to increase chances to remove some copies as
1352 the result of subsequent assignment. Record cost updates if
1353 RECORD_P is true. */
1354 static void
1355 update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1356 int divisor, bool decr_p, bool record_p)
1358 int cost, update_cost;
1359 machine_mode mode;
1360 enum reg_class rclass, aclass;
1361 ira_allocno_t another_allocno, from = NULL;
1362 ira_copy_t cp, next_cp;
1364 rclass = REGNO_REG_CLASS (hard_regno);
1367 mode = ALLOCNO_MODE (allocno);
1368 ira_init_register_move_cost_if_necessary (mode);
1369 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1371 if (cp->first == allocno)
1373 next_cp = cp->next_first_allocno_copy;
1374 another_allocno = cp->second;
1376 else if (cp->second == allocno)
1378 next_cp = cp->next_second_allocno_copy;
1379 another_allocno = cp->first;
1381 else
1382 gcc_unreachable ();
1384 if (another_allocno == from)
1385 continue;
1387 aclass = ALLOCNO_CLASS (another_allocno);
1388 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1389 hard_regno)
1390 || ALLOCNO_ASSIGNED_P (another_allocno))
1391 continue;
1393 cost = (cp->second == allocno
1394 ? ira_register_move_cost[mode][rclass][aclass]
1395 : ira_register_move_cost[mode][aclass][rclass]);
1396 if (decr_p)
1397 cost = -cost;
1399 update_cost = cp->freq * cost / divisor;
1400 if (update_cost == 0)
1401 continue;
1403 if (! update_allocno_cost (another_allocno, hard_regno, update_cost))
1404 continue;
1405 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1406 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1407 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1408 = get_update_cost_record (hard_regno, divisor,
1409 ALLOCNO_COLOR_DATA (another_allocno)
1410 ->update_cost_records);
1413 while (get_next_update_cost (&allocno, &from, &divisor));
1416 /* Decrease preferred ALLOCNO hard register costs and costs of
1417 allocnos connected to ALLOCNO through copy. */
1418 static void
1419 update_costs_from_prefs (ira_allocno_t allocno)
1421 ira_pref_t pref;
1423 start_update_cost ();
1424 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1425 update_costs_from_allocno (allocno, pref->hard_regno,
1426 COST_HOP_DIVISOR, true, true);
1429 /* Update (decrease if DECR_P) the cost of allocnos connected to
1430 ALLOCNO through copies to increase chances to remove some copies as
1431 the result of subsequent assignment. ALLOCNO was just assigned to
1432 a hard register. Record cost updates if RECORD_P is true. */
1433 static void
1434 update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1436 int hard_regno;
1438 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1439 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1440 start_update_cost ();
1441 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1444 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1445 before updating costs of these allocnos from given allocno. This
1446 is a wise thing to do as if given allocno did not get an expected
1447 hard reg, using smaller cost of the hard reg for allocnos connected
1448 by copies to given allocno becomes actually misleading. Free all
1449 update cost records for ALLOCNO as we don't need them anymore. */
1450 static void
1451 restore_costs_from_copies (ira_allocno_t allocno)
1453 struct update_cost_record *records, *curr;
1455 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1456 return;
1457 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1458 start_update_cost ();
1459 for (curr = records; curr != NULL; curr = curr->next)
1460 update_costs_from_allocno (allocno, curr->hard_regno,
1461 curr->divisor, true, false);
1462 free_update_cost_record_list (records);
1463 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
1466 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1467 of ACLASS by conflict costs of the unassigned allocnos
1468 connected by copies with allocnos in update_cost_queue. This
1469 update increases chances to remove some copies. */
1470 static void
1471 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1472 bool decr_p)
1474 int i, cost, class_size, freq, mult, div, divisor;
1475 int index, hard_regno;
1476 int *conflict_costs;
1477 bool cont_p;
1478 enum reg_class another_aclass;
1479 ira_allocno_t allocno, another_allocno, from;
1480 ira_copy_t cp, next_cp;
1482 while (get_next_update_cost (&allocno, &from, &divisor))
1483 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1485 if (cp->first == allocno)
1487 next_cp = cp->next_first_allocno_copy;
1488 another_allocno = cp->second;
1490 else if (cp->second == allocno)
1492 next_cp = cp->next_second_allocno_copy;
1493 another_allocno = cp->first;
1495 else
1496 gcc_unreachable ();
1498 if (another_allocno == from)
1499 continue;
1501 another_aclass = ALLOCNO_CLASS (another_allocno);
1502 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1503 || ALLOCNO_ASSIGNED_P (another_allocno)
1504 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1505 continue;
1506 class_size = ira_class_hard_regs_num[another_aclass];
1507 ira_allocate_and_copy_costs
1508 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1509 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1510 conflict_costs
1511 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1512 if (conflict_costs == NULL)
1513 cont_p = true;
1514 else
1516 mult = cp->freq;
1517 freq = ALLOCNO_FREQ (another_allocno);
1518 if (freq == 0)
1519 freq = 1;
1520 div = freq * divisor;
1521 cont_p = false;
1522 for (i = class_size - 1; i >= 0; i--)
1524 hard_regno = ira_class_hard_regs[another_aclass][i];
1525 ira_assert (hard_regno >= 0);
1526 index = ira_class_hard_reg_index[aclass][hard_regno];
1527 if (index < 0)
1528 continue;
1529 cost = (int) ((unsigned) conflict_costs [i] * mult) / div;
1530 if (cost == 0)
1531 continue;
1532 cont_p = true;
1533 if (decr_p)
1534 cost = -cost;
1535 costs[index] += cost;
1538 /* Probably 5 hops will be enough. */
1539 if (cont_p
1540 && divisor <= (COST_HOP_DIVISOR
1541 * COST_HOP_DIVISOR
1542 * COST_HOP_DIVISOR
1543 * COST_HOP_DIVISOR))
1544 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1548 /* Set up conflicting (through CONFLICT_REGS) for each object of
1549 allocno A and the start allocno profitable regs (through
1550 START_PROFITABLE_REGS). Remember that the start profitable regs
1551 exclude hard regs which can not hold value of mode of allocno A.
1552 This covers mostly cases when multi-register value should be
1553 aligned. */
1554 static inline void
1555 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1556 HARD_REG_SET *conflict_regs,
1557 HARD_REG_SET *start_profitable_regs)
1559 int i, nwords;
1560 ira_object_t obj;
1562 nwords = ALLOCNO_NUM_OBJECTS (a);
1563 for (i = 0; i < nwords; i++)
1565 obj = ALLOCNO_OBJECT (a, i);
1566 COPY_HARD_REG_SET (conflict_regs[i],
1567 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1569 if (retry_p)
1571 COPY_HARD_REG_SET (*start_profitable_regs,
1572 reg_class_contents[ALLOCNO_CLASS (a)]);
1573 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1574 ira_prohibited_class_mode_regs
1575 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1577 else
1578 COPY_HARD_REG_SET (*start_profitable_regs,
1579 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1582 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1583 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1584 static inline bool
1585 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1586 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1588 int j, nwords, nregs;
1589 enum reg_class aclass;
1590 machine_mode mode;
1592 aclass = ALLOCNO_CLASS (a);
1593 mode = ALLOCNO_MODE (a);
1594 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1595 hard_regno))
1596 return false;
1597 /* Checking only profitable hard regs. */
1598 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1599 return false;
1600 nregs = hard_regno_nregs[hard_regno][mode];
1601 nwords = ALLOCNO_NUM_OBJECTS (a);
1602 for (j = 0; j < nregs; j++)
1604 int k;
1605 int set_to_test_start = 0, set_to_test_end = nwords;
1607 if (nregs == nwords)
1609 if (REG_WORDS_BIG_ENDIAN)
1610 set_to_test_start = nwords - j - 1;
1611 else
1612 set_to_test_start = j;
1613 set_to_test_end = set_to_test_start + 1;
1615 for (k = set_to_test_start; k < set_to_test_end; k++)
1616 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1617 break;
1618 if (k != set_to_test_end)
1619 break;
1621 return j == nregs;
1624 /* Return number of registers needed to be saved and restored at
1625 function prologue/epilogue if we allocate HARD_REGNO to hold value
1626 of MODE. */
1627 static int
1628 calculate_saved_nregs (int hard_regno, machine_mode mode)
1630 int i;
1631 int nregs = 0;
1633 ira_assert (hard_regno >= 0);
1634 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1635 if (!allocated_hardreg_p[hard_regno + i]
1636 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1637 && !LOCAL_REGNO (hard_regno + i))
1638 nregs++;
1639 return nregs;
1642 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1643 that the function called from function
1644 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1645 this case some allocno data are not defined or updated and we
1646 should not touch these data. The function returns true if we
1647 managed to assign a hard register to the allocno.
1649 To assign a hard register, first of all we calculate all conflict
1650 hard registers which can come from conflicting allocnos with
1651 already assigned hard registers. After that we find first free
1652 hard register with the minimal cost. During hard register cost
1653 calculation we take conflict hard register costs into account to
1654 give a chance for conflicting allocnos to get a better hard
1655 register in the future.
1657 If the best hard register cost is bigger than cost of memory usage
1658 for the allocno, we don't assign a hard register to given allocno
1659 at all.
1661 If we assign a hard register to the allocno, we update costs of the
1662 hard register for allocnos connected by copies to improve a chance
1663 to coalesce insns represented by the copies when we assign hard
1664 registers to the allocnos connected by the copies. */
1665 static bool
1666 assign_hard_reg (ira_allocno_t a, bool retry_p)
1668 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1669 int i, j, hard_regno, best_hard_regno, class_size;
1670 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1671 int *a_costs;
1672 enum reg_class aclass;
1673 machine_mode mode;
1674 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1675 int saved_nregs;
1676 enum reg_class rclass;
1677 int add_cost;
1678 #ifdef STACK_REGS
1679 bool no_stack_reg_p;
1680 #endif
1682 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1683 get_conflict_and_start_profitable_regs (a, retry_p,
1684 conflicting_regs,
1685 &profitable_hard_regs);
1686 aclass = ALLOCNO_CLASS (a);
1687 class_size = ira_class_hard_regs_num[aclass];
1688 best_hard_regno = -1;
1689 memset (full_costs, 0, sizeof (int) * class_size);
1690 mem_cost = 0;
1691 memset (costs, 0, sizeof (int) * class_size);
1692 memset (full_costs, 0, sizeof (int) * class_size);
1693 #ifdef STACK_REGS
1694 no_stack_reg_p = false;
1695 #endif
1696 if (! retry_p)
1697 start_update_cost ();
1698 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1700 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1701 aclass, ALLOCNO_HARD_REG_COSTS (a));
1702 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1703 #ifdef STACK_REGS
1704 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1705 #endif
1706 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1707 for (i = 0; i < class_size; i++)
1708 if (a_costs != NULL)
1710 costs[i] += a_costs[i];
1711 full_costs[i] += a_costs[i];
1713 else
1715 costs[i] += cost;
1716 full_costs[i] += cost;
1718 nwords = ALLOCNO_NUM_OBJECTS (a);
1719 curr_allocno_process++;
1720 for (word = 0; word < nwords; word++)
1722 ira_object_t conflict_obj;
1723 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1724 ira_object_conflict_iterator oci;
1726 /* Take preferences of conflicting allocnos into account. */
1727 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1729 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1730 enum reg_class conflict_aclass;
1731 allocno_color_data_t data = ALLOCNO_COLOR_DATA (conflict_a);
1733 /* Reload can give another class so we need to check all
1734 allocnos. */
1735 if (!retry_p
1736 && (!bitmap_bit_p (consideration_allocno_bitmap,
1737 ALLOCNO_NUM (conflict_a))
1738 || ((!ALLOCNO_ASSIGNED_P (conflict_a)
1739 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1740 && !(hard_reg_set_intersect_p
1741 (profitable_hard_regs,
1742 ALLOCNO_COLOR_DATA
1743 (conflict_a)->profitable_hard_regs)))))
1744 continue;
1745 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1746 ira_assert (ira_reg_classes_intersect_p
1747 [aclass][conflict_aclass]);
1748 if (ALLOCNO_ASSIGNED_P (conflict_a))
1750 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1751 if (hard_regno >= 0
1752 && (ira_hard_reg_set_intersection_p
1753 (hard_regno, ALLOCNO_MODE (conflict_a),
1754 reg_class_contents[aclass])))
1756 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1757 int conflict_nregs;
1759 mode = ALLOCNO_MODE (conflict_a);
1760 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1761 if (conflict_nregs == n_objects && conflict_nregs > 1)
1763 int num = OBJECT_SUBWORD (conflict_obj);
1765 if (REG_WORDS_BIG_ENDIAN)
1766 SET_HARD_REG_BIT (conflicting_regs[word],
1767 hard_regno + n_objects - num - 1);
1768 else
1769 SET_HARD_REG_BIT (conflicting_regs[word],
1770 hard_regno + num);
1772 else
1773 IOR_HARD_REG_SET
1774 (conflicting_regs[word],
1775 ira_reg_mode_hard_regset[hard_regno][mode]);
1776 if (hard_reg_set_subset_p (profitable_hard_regs,
1777 conflicting_regs[word]))
1778 goto fail;
1781 else if (! retry_p
1782 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1783 /* Don't process the conflict allocno twice. */
1784 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1785 != curr_allocno_process))
1787 int k, *conflict_costs;
1789 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1790 = curr_allocno_process;
1791 ira_allocate_and_copy_costs
1792 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1793 conflict_aclass,
1794 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1795 conflict_costs
1796 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1797 if (conflict_costs != NULL)
1798 for (j = class_size - 1; j >= 0; j--)
1800 hard_regno = ira_class_hard_regs[aclass][j];
1801 ira_assert (hard_regno >= 0);
1802 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1803 if (k < 0
1804 /* If HARD_REGNO is not available for CONFLICT_A,
1805 the conflict would be ignored, since HARD_REGNO
1806 will never be assigned to CONFLICT_A. */
1807 || !TEST_HARD_REG_BIT (data->profitable_hard_regs,
1808 hard_regno))
1809 continue;
1810 full_costs[j] -= conflict_costs[k];
1812 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1817 if (! retry_p)
1818 /* Take into account preferences of allocnos connected by copies to
1819 the conflict allocnos. */
1820 update_conflict_hard_regno_costs (full_costs, aclass, true);
1822 /* Take preferences of allocnos connected by copies into
1823 account. */
1824 if (! retry_p)
1826 start_update_cost ();
1827 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1828 update_conflict_hard_regno_costs (full_costs, aclass, false);
1830 min_cost = min_full_cost = INT_MAX;
1831 /* We don't care about giving callee saved registers to allocnos no
1832 living through calls because call clobbered registers are
1833 allocated first (it is usual practice to put them first in
1834 REG_ALLOC_ORDER). */
1835 mode = ALLOCNO_MODE (a);
1836 for (i = 0; i < class_size; i++)
1838 hard_regno = ira_class_hard_regs[aclass][i];
1839 #ifdef STACK_REGS
1840 if (no_stack_reg_p
1841 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1842 continue;
1843 #endif
1844 if (! check_hard_reg_p (a, hard_regno,
1845 conflicting_regs, profitable_hard_regs))
1846 continue;
1847 cost = costs[i];
1848 full_cost = full_costs[i];
1849 if (!HONOR_REG_ALLOC_ORDER)
1851 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1852 /* We need to save/restore the hard register in
1853 epilogue/prologue. Therefore we increase the cost. */
1855 rclass = REGNO_REG_CLASS (hard_regno);
1856 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1857 + ira_memory_move_cost[mode][rclass][1])
1858 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1859 cost += add_cost;
1860 full_cost += add_cost;
1863 if (min_cost > cost)
1864 min_cost = cost;
1865 if (min_full_cost > full_cost)
1867 min_full_cost = full_cost;
1868 best_hard_regno = hard_regno;
1869 ira_assert (hard_regno >= 0);
1872 if (min_full_cost > mem_cost)
1874 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1875 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1876 mem_cost, min_full_cost);
1877 best_hard_regno = -1;
1879 fail:
1880 if (best_hard_regno >= 0)
1882 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1883 allocated_hardreg_p[best_hard_regno + i] = true;
1885 if (! retry_p)
1886 restore_costs_from_copies (a);
1887 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1888 ALLOCNO_ASSIGNED_P (a) = true;
1889 if (best_hard_regno >= 0)
1890 update_costs_from_copies (a, true, ! retry_p);
1891 ira_assert (ALLOCNO_CLASS (a) == aclass);
1892 /* We don't need updated costs anymore. */
1893 ira_free_allocno_updated_costs (a);
1894 return best_hard_regno >= 0;
1899 /* An array used to sort copies. */
1900 static ira_copy_t *sorted_copies;
1902 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1903 used to find a conflict for new allocnos or allocnos with the
1904 different allocno classes. */
1905 static bool
1906 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1908 rtx reg1, reg2;
1909 int i, j;
1910 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1911 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1913 if (a1 == a2)
1914 return false;
1915 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1916 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1917 if (reg1 != NULL && reg2 != NULL
1918 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1919 return false;
1921 for (i = 0; i < n1; i++)
1923 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1925 for (j = 0; j < n2; j++)
1927 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1929 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1930 OBJECT_LIVE_RANGES (c2)))
1931 return true;
1934 return false;
1937 /* The function is used to sort copies according to their execution
1938 frequencies. */
1939 static int
1940 copy_freq_compare_func (const void *v1p, const void *v2p)
1942 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1943 int pri1, pri2;
1945 pri1 = cp1->freq;
1946 pri2 = cp2->freq;
1947 if (pri2 - pri1)
1948 return pri2 - pri1;
1950 /* If frequencies are equal, sort by copies, so that the results of
1951 qsort leave nothing to chance. */
1952 return cp1->num - cp2->num;
1957 /* Return true if any allocno from thread of A1 conflicts with any
1958 allocno from thread A2. */
1959 static bool
1960 allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1962 ira_allocno_t a, conflict_a;
1964 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
1965 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1967 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
1968 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
1970 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
1971 return true;
1972 if (conflict_a == a1)
1973 break;
1975 if (a == a2)
1976 break;
1978 return false;
1981 /* Merge two threads given correspondingly by their first allocnos T1
1982 and T2 (more accurately merging T2 into T1). */
1983 static void
1984 merge_threads (ira_allocno_t t1, ira_allocno_t t2)
1986 ira_allocno_t a, next, last;
1988 gcc_assert (t1 != t2
1989 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
1990 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
1991 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
1992 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1994 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
1995 if (a == t2)
1996 break;
1997 last = a;
1999 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
2000 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
2001 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
2002 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
2005 /* Create threads by processing CP_NUM copies from sorted copies. We
2006 process the most expensive copies first. */
2007 static void
2008 form_threads_from_copies (int cp_num)
2010 ira_allocno_t a, thread1, thread2;
2011 ira_copy_t cp;
2012 int i, n;
2014 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2015 /* Form threads processing copies, most frequently executed
2016 first. */
2017 for (; cp_num != 0;)
2019 for (i = 0; i < cp_num; i++)
2021 cp = sorted_copies[i];
2022 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
2023 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2024 if (thread1 == thread2)
2025 continue;
2026 if (! allocno_thread_conflict_p (thread1, thread2))
2028 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2029 fprintf
2030 (ira_dump_file,
2031 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2032 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2033 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2034 cp->freq);
2035 merge_threads (thread1, thread2);
2036 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2038 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2039 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2040 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2041 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2042 ALLOCNO_FREQ (thread1));
2043 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2044 a != thread1;
2045 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2046 fprintf (ira_dump_file, " a%dr%d(%d)",
2047 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2048 ALLOCNO_FREQ (a));
2049 fprintf (ira_dump_file, "\n");
2051 i++;
2052 break;
2055 /* Collect the rest of copies. */
2056 for (n = 0; i < cp_num; i++)
2058 cp = sorted_copies[i];
2059 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2060 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2061 sorted_copies[n++] = cp;
2063 cp_num = n;
2067 /* Create threads by processing copies of all alocnos from BUCKET. We
2068 process the most expensive copies first. */
2069 static void
2070 form_threads_from_bucket (ira_allocno_t bucket)
2072 ira_allocno_t a;
2073 ira_copy_t cp, next_cp;
2074 int cp_num = 0;
2076 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2078 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2080 if (cp->first == a)
2082 next_cp = cp->next_first_allocno_copy;
2083 sorted_copies[cp_num++] = cp;
2085 else if (cp->second == a)
2086 next_cp = cp->next_second_allocno_copy;
2087 else
2088 gcc_unreachable ();
2091 form_threads_from_copies (cp_num);
2094 /* Create threads by processing copies of colorable allocno A. We
2095 process most expensive copies first. */
2096 static void
2097 form_threads_from_colorable_allocno (ira_allocno_t a)
2099 ira_allocno_t another_a;
2100 ira_copy_t cp, next_cp;
2101 int cp_num = 0;
2103 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2105 if (cp->first == a)
2107 next_cp = cp->next_first_allocno_copy;
2108 another_a = cp->second;
2110 else if (cp->second == a)
2112 next_cp = cp->next_second_allocno_copy;
2113 another_a = cp->first;
2115 else
2116 gcc_unreachable ();
2117 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2118 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2119 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2120 sorted_copies[cp_num++] = cp;
2122 form_threads_from_copies (cp_num);
2125 /* Form initial threads which contain only one allocno. */
2126 static void
2127 init_allocno_threads (void)
2129 ira_allocno_t a;
2130 unsigned int j;
2131 bitmap_iterator bi;
2133 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2135 a = ira_allocnos[j];
2136 /* Set up initial thread data: */
2137 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2138 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2139 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2145 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2147 /* Bucket of allocnos that can colored currently without spilling. */
2148 static ira_allocno_t colorable_allocno_bucket;
2150 /* Bucket of allocnos that might be not colored currently without
2151 spilling. */
2152 static ira_allocno_t uncolorable_allocno_bucket;
2154 /* The current number of allocnos in the uncolorable_bucket. */
2155 static int uncolorable_allocnos_num;
2157 /* Return the current spill priority of allocno A. The less the
2158 number, the more preferable the allocno for spilling. */
2159 static inline int
2160 allocno_spill_priority (ira_allocno_t a)
2162 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2164 return (data->temp
2165 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2166 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
2167 + 1));
2170 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2171 before the call. */
2172 static void
2173 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2175 ira_allocno_t first_a;
2176 allocno_color_data_t data;
2178 if (bucket_ptr == &uncolorable_allocno_bucket
2179 && ALLOCNO_CLASS (a) != NO_REGS)
2181 uncolorable_allocnos_num++;
2182 ira_assert (uncolorable_allocnos_num > 0);
2184 first_a = *bucket_ptr;
2185 data = ALLOCNO_COLOR_DATA (a);
2186 data->next_bucket_allocno = first_a;
2187 data->prev_bucket_allocno = NULL;
2188 if (first_a != NULL)
2189 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2190 *bucket_ptr = a;
2193 /* Compare two allocnos to define which allocno should be pushed first
2194 into the coloring stack. If the return is a negative number, the
2195 allocno given by the first parameter will be pushed first. In this
2196 case such allocno has less priority than the second one and the
2197 hard register will be assigned to it after assignment to the second
2198 one. As the result of such assignment order, the second allocno
2199 has a better chance to get the best hard register. */
2200 static int
2201 bucket_allocno_compare_func (const void *v1p, const void *v2p)
2203 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2204 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2205 int diff, freq1, freq2, a1_num, a2_num;
2206 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2207 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
2208 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2210 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2211 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2212 if ((diff = freq1 - freq2) != 0)
2213 return diff;
2215 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2216 return diff;
2218 /* Push pseudos requiring less hard registers first. It means that
2219 we will assign pseudos requiring more hard registers first
2220 avoiding creation small holes in free hard register file into
2221 which the pseudos requiring more hard registers can not fit. */
2222 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2223 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
2224 return diff;
2226 freq1 = ALLOCNO_FREQ (a1);
2227 freq2 = ALLOCNO_FREQ (a2);
2228 if ((diff = freq1 - freq2) != 0)
2229 return diff;
2231 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2232 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2233 if ((diff = a2_num - a1_num) != 0)
2234 return diff;
2235 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2238 /* Sort bucket *BUCKET_PTR and return the result through
2239 BUCKET_PTR. */
2240 static void
2241 sort_bucket (ira_allocno_t *bucket_ptr,
2242 int (*compare_func) (const void *, const void *))
2244 ira_allocno_t a, head;
2245 int n;
2247 for (n = 0, a = *bucket_ptr;
2248 a != NULL;
2249 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2250 sorted_allocnos[n++] = a;
2251 if (n <= 1)
2252 return;
2253 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
2254 head = NULL;
2255 for (n--; n >= 0; n--)
2257 a = sorted_allocnos[n];
2258 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2259 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
2260 if (head != NULL)
2261 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
2262 head = a;
2264 *bucket_ptr = head;
2267 /* Add ALLOCNO to colorable bucket maintaining the order according
2268 their priority. ALLOCNO should be not in a bucket before the
2269 call. */
2270 static void
2271 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2273 ira_allocno_t before, after;
2275 form_threads_from_colorable_allocno (allocno);
2276 for (before = colorable_allocno_bucket, after = NULL;
2277 before != NULL;
2278 after = before,
2279 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
2280 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2281 break;
2282 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2283 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
2284 if (after == NULL)
2285 colorable_allocno_bucket = allocno;
2286 else
2287 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
2288 if (before != NULL)
2289 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
2292 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2293 the call. */
2294 static void
2295 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2297 ira_allocno_t prev_allocno, next_allocno;
2299 if (bucket_ptr == &uncolorable_allocno_bucket
2300 && ALLOCNO_CLASS (allocno) != NO_REGS)
2302 uncolorable_allocnos_num--;
2303 ira_assert (uncolorable_allocnos_num >= 0);
2305 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2306 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
2307 if (prev_allocno != NULL)
2308 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
2309 else
2311 ira_assert (*bucket_ptr == allocno);
2312 *bucket_ptr = next_allocno;
2314 if (next_allocno != NULL)
2315 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
2318 /* Put allocno A onto the coloring stack without removing it from its
2319 bucket. Pushing allocno to the coloring stack can result in moving
2320 conflicting allocnos from the uncolorable bucket to the colorable
2321 one. */
2322 static void
2323 push_allocno_to_stack (ira_allocno_t a)
2325 enum reg_class aclass;
2326 allocno_color_data_t data, conflict_data;
2327 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2329 data = ALLOCNO_COLOR_DATA (a);
2330 data->in_graph_p = false;
2331 allocno_stack_vec.safe_push (a);
2332 aclass = ALLOCNO_CLASS (a);
2333 if (aclass == NO_REGS)
2334 return;
2335 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2336 if (n > 1)
2338 /* We will deal with the subwords individually. */
2339 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
2340 size = 1;
2342 for (i = 0; i < n; i++)
2344 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2345 ira_object_t conflict_obj;
2346 ira_object_conflict_iterator oci;
2348 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2350 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2352 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
2353 if (conflict_data->colorable_p
2354 || ! conflict_data->in_graph_p
2355 || ALLOCNO_ASSIGNED_P (conflict_a)
2356 || !(hard_reg_set_intersect_p
2357 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2358 conflict_data->profitable_hard_regs)))
2359 continue;
2360 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2361 ALLOCNO_NUM (conflict_a)));
2362 if (update_left_conflict_sizes_p (conflict_a, a, size))
2364 delete_allocno_from_bucket
2365 (conflict_a, &uncolorable_allocno_bucket);
2366 add_allocno_to_ordered_colorable_bucket (conflict_a);
2367 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2369 fprintf (ira_dump_file, " Making");
2370 ira_print_expanded_allocno (conflict_a);
2371 fprintf (ira_dump_file, " colorable\n");
2379 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2380 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2381 static void
2382 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2384 if (colorable_p)
2385 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2386 else
2387 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2388 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2390 fprintf (ira_dump_file, " Pushing");
2391 ira_print_expanded_allocno (allocno);
2392 if (colorable_p)
2393 fprintf (ira_dump_file, "(cost %d)\n",
2394 ALLOCNO_COLOR_DATA (allocno)->temp);
2395 else
2396 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2397 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
2398 allocno_spill_priority (allocno),
2399 ALLOCNO_COLOR_DATA (allocno)->temp);
2401 if (! colorable_p)
2402 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
2403 push_allocno_to_stack (allocno);
2406 /* Put all allocnos from colorable bucket onto the coloring stack. */
2407 static void
2408 push_only_colorable (void)
2410 form_threads_from_bucket (colorable_allocno_bucket);
2411 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2412 for (;colorable_allocno_bucket != NULL;)
2413 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2416 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2417 loop given by its LOOP_NODE. */
2419 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2421 int freq, i;
2422 edge_iterator ei;
2423 edge e;
2424 vec<edge> edges;
2426 ira_assert (current_loops != NULL && loop_node->loop != NULL
2427 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2428 freq = 0;
2429 if (! exit_p)
2431 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2432 if (e->src != loop_node->loop->latch
2433 && (regno < 0
2434 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2435 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2436 freq += EDGE_FREQUENCY (e);
2438 else
2440 edges = get_loop_exit_edges (loop_node->loop);
2441 FOR_EACH_VEC_ELT (edges, i, e)
2442 if (regno < 0
2443 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2444 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2445 freq += EDGE_FREQUENCY (e);
2446 edges.release ();
2449 return REG_FREQ_FROM_EDGE_FREQ (freq);
2452 /* Calculate and return the cost of putting allocno A into memory. */
2453 static int
2454 calculate_allocno_spill_cost (ira_allocno_t a)
2456 int regno, cost;
2457 machine_mode mode;
2458 enum reg_class rclass;
2459 ira_allocno_t parent_allocno;
2460 ira_loop_tree_node_t parent_node, loop_node;
2462 regno = ALLOCNO_REGNO (a);
2463 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2464 if (ALLOCNO_CAP (a) != NULL)
2465 return cost;
2466 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2467 if ((parent_node = loop_node->parent) == NULL)
2468 return cost;
2469 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2470 return cost;
2471 mode = ALLOCNO_MODE (a);
2472 rclass = ALLOCNO_CLASS (a);
2473 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2474 cost -= (ira_memory_move_cost[mode][rclass][0]
2475 * ira_loop_edge_freq (loop_node, regno, true)
2476 + ira_memory_move_cost[mode][rclass][1]
2477 * ira_loop_edge_freq (loop_node, regno, false));
2478 else
2480 ira_init_register_move_cost_if_necessary (mode);
2481 cost += ((ira_memory_move_cost[mode][rclass][1]
2482 * ira_loop_edge_freq (loop_node, regno, true)
2483 + ira_memory_move_cost[mode][rclass][0]
2484 * ira_loop_edge_freq (loop_node, regno, false))
2485 - (ira_register_move_cost[mode][rclass][rclass]
2486 * (ira_loop_edge_freq (loop_node, regno, false)
2487 + ira_loop_edge_freq (loop_node, regno, true))));
2489 return cost;
2492 /* Used for sorting allocnos for spilling. */
2493 static inline int
2494 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2496 int pri1, pri2, diff;
2498 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2499 return 1;
2500 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2501 return -1;
2502 pri1 = allocno_spill_priority (a1);
2503 pri2 = allocno_spill_priority (a2);
2504 if ((diff = pri1 - pri2) != 0)
2505 return diff;
2506 if ((diff
2507 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2508 return diff;
2509 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2512 /* Used for sorting allocnos for spilling. */
2513 static int
2514 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2516 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2517 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2519 return allocno_spill_priority_compare (p1, p2);
2522 /* Push allocnos to the coloring stack. The order of allocnos in the
2523 stack defines the order for the subsequent coloring. */
2524 static void
2525 push_allocnos_to_stack (void)
2527 ira_allocno_t a;
2528 int cost;
2530 /* Calculate uncolorable allocno spill costs. */
2531 for (a = uncolorable_allocno_bucket;
2532 a != NULL;
2533 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2534 if (ALLOCNO_CLASS (a) != NO_REGS)
2536 cost = calculate_allocno_spill_cost (a);
2537 /* ??? Remove cost of copies between the coalesced
2538 allocnos. */
2539 ALLOCNO_COLOR_DATA (a)->temp = cost;
2541 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2542 for (;;)
2544 push_only_colorable ();
2545 a = uncolorable_allocno_bucket;
2546 if (a == NULL)
2547 break;
2548 remove_allocno_from_bucket_and_push (a, false);
2550 ira_assert (colorable_allocno_bucket == NULL
2551 && uncolorable_allocno_bucket == NULL);
2552 ira_assert (uncolorable_allocnos_num == 0);
2555 /* Pop the coloring stack and assign hard registers to the popped
2556 allocnos. */
2557 static void
2558 pop_allocnos_from_stack (void)
2560 ira_allocno_t allocno;
2561 enum reg_class aclass;
2563 for (;allocno_stack_vec.length () != 0;)
2565 allocno = allocno_stack_vec.pop ();
2566 aclass = ALLOCNO_CLASS (allocno);
2567 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2569 fprintf (ira_dump_file, " Popping");
2570 ira_print_expanded_allocno (allocno);
2571 fprintf (ira_dump_file, " -- ");
2573 if (aclass == NO_REGS)
2575 ALLOCNO_HARD_REGNO (allocno) = -1;
2576 ALLOCNO_ASSIGNED_P (allocno) = true;
2577 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2578 ira_assert
2579 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2580 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2581 fprintf (ira_dump_file, "assign memory\n");
2583 else if (assign_hard_reg (allocno, false))
2585 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2586 fprintf (ira_dump_file, "assign reg %d\n",
2587 ALLOCNO_HARD_REGNO (allocno));
2589 else if (ALLOCNO_ASSIGNED_P (allocno))
2591 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2592 fprintf (ira_dump_file, "spill%s\n",
2593 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2594 ? "" : "!");
2596 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2600 /* Set up number of available hard registers for allocno A. */
2601 static void
2602 setup_allocno_available_regs_num (ira_allocno_t a)
2604 int i, n, hard_regno, hard_regs_num, nwords;
2605 enum reg_class aclass;
2606 allocno_color_data_t data;
2608 aclass = ALLOCNO_CLASS (a);
2609 data = ALLOCNO_COLOR_DATA (a);
2610 data->available_regs_num = 0;
2611 if (aclass == NO_REGS)
2612 return;
2613 hard_regs_num = ira_class_hard_regs_num[aclass];
2614 nwords = ALLOCNO_NUM_OBJECTS (a);
2615 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2617 hard_regno = ira_class_hard_regs[aclass][i];
2618 /* Checking only profitable hard regs. */
2619 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2620 n++;
2622 data->available_regs_num = n;
2623 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2624 return;
2625 fprintf
2626 (ira_dump_file,
2627 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2628 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2629 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2630 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2631 fprintf (ira_dump_file, ", %snode: ",
2632 hard_reg_set_equal_p (data->profitable_hard_regs,
2633 data->hard_regs_node->hard_regs->set)
2634 ? "" : "^");
2635 print_hard_reg_set (ira_dump_file,
2636 data->hard_regs_node->hard_regs->set, false);
2637 for (i = 0; i < nwords; i++)
2639 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2641 if (nwords != 1)
2643 if (i != 0)
2644 fprintf (ira_dump_file, ", ");
2645 fprintf (ira_dump_file, " obj %d", i);
2647 fprintf (ira_dump_file, " (confl regs = ");
2648 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2649 false);
2650 fprintf (ira_dump_file, ")");
2652 fprintf (ira_dump_file, "\n");
2655 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2656 conflicting allocnos and hard registers. */
2657 static void
2658 put_allocno_into_bucket (ira_allocno_t allocno)
2660 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2661 setup_allocno_available_regs_num (allocno);
2662 if (setup_left_conflict_sizes_p (allocno))
2663 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2664 else
2665 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2668 /* Map: allocno number -> allocno priority. */
2669 static int *allocno_priorities;
2671 /* Set up priorities for N allocnos in array
2672 CONSIDERATION_ALLOCNOS. */
2673 static void
2674 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2676 int i, length, nrefs, priority, max_priority, mult;
2677 ira_allocno_t a;
2679 max_priority = 0;
2680 for (i = 0; i < n; i++)
2682 a = consideration_allocnos[i];
2683 nrefs = ALLOCNO_NREFS (a);
2684 ira_assert (nrefs >= 0);
2685 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2686 ira_assert (mult >= 0);
2687 allocno_priorities[ALLOCNO_NUM (a)]
2688 = priority
2689 = (mult
2690 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2691 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2692 if (priority < 0)
2693 priority = -priority;
2694 if (max_priority < priority)
2695 max_priority = priority;
2697 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2698 for (i = 0; i < n; i++)
2700 a = consideration_allocnos[i];
2701 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2702 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2703 length /= ALLOCNO_NUM_OBJECTS (a);
2704 if (length <= 0)
2705 length = 1;
2706 allocno_priorities[ALLOCNO_NUM (a)]
2707 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2711 /* Sort allocnos according to the profit of usage of a hard register
2712 instead of memory for them. */
2713 static int
2714 allocno_cost_compare_func (const void *v1p, const void *v2p)
2716 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2717 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2718 int c1, c2;
2720 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2721 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2722 if (c1 - c2)
2723 return c1 - c2;
2725 /* If regs are equally good, sort by allocno numbers, so that the
2726 results of qsort leave nothing to chance. */
2727 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2730 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2731 possible to hard registers. Let us try to improve allocation with
2732 cost point of view. This function improves the allocation by
2733 spilling some allocnos and assigning the freed hard registers to
2734 other allocnos if it decreases the overall allocation cost. */
2735 static void
2736 improve_allocation (void)
2738 unsigned int i;
2739 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2740 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2741 bool try_p;
2742 enum reg_class aclass;
2743 machine_mode mode;
2744 int *allocno_costs;
2745 int costs[FIRST_PSEUDO_REGISTER];
2746 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2747 ira_allocno_t a;
2748 bitmap_iterator bi;
2750 /* Clear counts used to process conflicting allocnos only once for
2751 each allocno. */
2752 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2753 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2754 check = n = 0;
2755 /* Process each allocno and try to assign a hard register to it by
2756 spilling some its conflicting allocnos. */
2757 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2759 a = ira_allocnos[i];
2760 ALLOCNO_COLOR_DATA (a)->temp = 0;
2761 if (empty_profitable_hard_regs (a))
2762 continue;
2763 check++;
2764 aclass = ALLOCNO_CLASS (a);
2765 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2766 if (allocno_costs == NULL)
2767 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2768 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2769 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2770 else if (allocno_costs == NULL)
2771 /* It means that assigning a hard register is not profitable
2772 (we don't waste memory for hard register costs in this
2773 case). */
2774 continue;
2775 else
2776 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2777 try_p = false;
2778 get_conflict_and_start_profitable_regs (a, false,
2779 conflicting_regs,
2780 &profitable_hard_regs);
2781 class_size = ira_class_hard_regs_num[aclass];
2782 /* Set up cost improvement for usage of each profitable hard
2783 register for allocno A. */
2784 for (j = 0; j < class_size; j++)
2786 hregno = ira_class_hard_regs[aclass][j];
2787 if (! check_hard_reg_p (a, hregno,
2788 conflicting_regs, profitable_hard_regs))
2789 continue;
2790 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2791 k = allocno_costs == NULL ? 0 : j;
2792 costs[hregno] = (allocno_costs == NULL
2793 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2794 costs[hregno] -= base_cost;
2795 if (costs[hregno] < 0)
2796 try_p = true;
2798 if (! try_p)
2799 /* There is no chance to improve the allocation cost by
2800 assigning hard register to allocno A even without spilling
2801 conflicting allocnos. */
2802 continue;
2803 mode = ALLOCNO_MODE (a);
2804 nwords = ALLOCNO_NUM_OBJECTS (a);
2805 /* Process each allocno conflicting with A and update the cost
2806 improvement for profitable hard registers of A. To use a
2807 hard register for A we need to spill some conflicting
2808 allocnos and that creates penalty for the cost
2809 improvement. */
2810 for (word = 0; word < nwords; word++)
2812 ira_object_t conflict_obj;
2813 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2814 ira_object_conflict_iterator oci;
2816 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2818 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2820 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2821 /* We already processed this conflicting allocno
2822 because we processed earlier another object of the
2823 conflicting allocno. */
2824 continue;
2825 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2826 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2827 continue;
2828 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2829 k = (ira_class_hard_reg_index
2830 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2831 ira_assert (k >= 0);
2832 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2833 != NULL)
2834 spill_cost -= allocno_costs[k];
2835 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2836 != NULL)
2837 spill_cost -= allocno_costs[k];
2838 else
2839 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2840 conflict_nregs
2841 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2842 for (r = conflict_hregno;
2843 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2844 r--)
2845 if (check_hard_reg_p (a, r,
2846 conflicting_regs, profitable_hard_regs))
2847 costs[r] += spill_cost;
2848 for (r = conflict_hregno + 1;
2849 r < conflict_hregno + conflict_nregs;
2850 r++)
2851 if (check_hard_reg_p (a, r,
2852 conflicting_regs, profitable_hard_regs))
2853 costs[r] += spill_cost;
2856 min_cost = INT_MAX;
2857 best = -1;
2858 /* Now we choose hard register for A which results in highest
2859 allocation cost improvement. */
2860 for (j = 0; j < class_size; j++)
2862 hregno = ira_class_hard_regs[aclass][j];
2863 if (check_hard_reg_p (a, hregno,
2864 conflicting_regs, profitable_hard_regs)
2865 && min_cost > costs[hregno])
2867 best = hregno;
2868 min_cost = costs[hregno];
2871 if (min_cost >= 0)
2872 /* We are in a situation when assigning any hard register to A
2873 by spilling some conflicting allocnos does not improve the
2874 allocation cost. */
2875 continue;
2876 nregs = hard_regno_nregs[best][mode];
2877 /* Now spill conflicting allocnos which contain a hard register
2878 of A when we assign the best chosen hard register to it. */
2879 for (word = 0; word < nwords; word++)
2881 ira_object_t conflict_obj;
2882 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2883 ira_object_conflict_iterator oci;
2885 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2887 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2889 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2890 continue;
2891 conflict_nregs
2892 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2893 if (best + nregs <= conflict_hregno
2894 || conflict_hregno + conflict_nregs <= best)
2895 /* No intersection. */
2896 continue;
2897 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2898 sorted_allocnos[n++] = conflict_a;
2899 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2900 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2901 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2902 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2905 /* Assign the best chosen hard register to A. */
2906 ALLOCNO_HARD_REGNO (a) = best;
2907 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2908 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2909 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2911 if (n == 0)
2912 return;
2913 /* We spilled some allocnos to assign their hard registers to other
2914 allocnos. The spilled allocnos are now in array
2915 'sorted_allocnos'. There is still a possibility that some of the
2916 spilled allocnos can get hard registers. So let us try assign
2917 them hard registers again (just a reminder -- function
2918 'assign_hard_reg' assigns hard registers only if it is possible
2919 and profitable). We process the spilled allocnos with biggest
2920 benefit to get hard register first -- see function
2921 'allocno_cost_compare_func'. */
2922 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2923 allocno_cost_compare_func);
2924 for (j = 0; j < n; j++)
2926 a = sorted_allocnos[j];
2927 ALLOCNO_ASSIGNED_P (a) = false;
2928 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2930 fprintf (ira_dump_file, " ");
2931 ira_print_expanded_allocno (a);
2932 fprintf (ira_dump_file, " -- ");
2934 if (assign_hard_reg (a, false))
2936 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2937 fprintf (ira_dump_file, "assign hard reg %d\n",
2938 ALLOCNO_HARD_REGNO (a));
2940 else
2942 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2943 fprintf (ira_dump_file, "assign memory\n");
2948 /* Sort allocnos according to their priorities. */
2949 static int
2950 allocno_priority_compare_func (const void *v1p, const void *v2p)
2952 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2953 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2954 int pri1, pri2;
2956 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2957 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2958 if (pri2 != pri1)
2959 return SORTGT (pri2, pri1);
2961 /* If regs are equally good, sort by allocnos, so that the results of
2962 qsort leave nothing to chance. */
2963 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2966 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2967 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2968 static void
2969 color_allocnos (void)
2971 unsigned int i, n;
2972 bitmap_iterator bi;
2973 ira_allocno_t a;
2975 setup_profitable_hard_regs ();
2976 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2978 int l, nr;
2979 HARD_REG_SET conflict_hard_regs;
2980 allocno_color_data_t data;
2981 ira_pref_t pref, next_pref;
2983 a = ira_allocnos[i];
2984 nr = ALLOCNO_NUM_OBJECTS (a);
2985 CLEAR_HARD_REG_SET (conflict_hard_regs);
2986 for (l = 0; l < nr; l++)
2988 ira_object_t obj = ALLOCNO_OBJECT (a, l);
2989 IOR_HARD_REG_SET (conflict_hard_regs,
2990 OBJECT_CONFLICT_HARD_REGS (obj));
2992 data = ALLOCNO_COLOR_DATA (a);
2993 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
2995 next_pref = pref->next_pref;
2996 if (! ira_hard_reg_in_set_p (pref->hard_regno,
2997 ALLOCNO_MODE (a),
2998 data->profitable_hard_regs))
2999 ira_remove_pref (pref);
3002 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
3004 n = 0;
3005 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3007 a = ira_allocnos[i];
3008 if (ALLOCNO_CLASS (a) == NO_REGS)
3010 ALLOCNO_HARD_REGNO (a) = -1;
3011 ALLOCNO_ASSIGNED_P (a) = true;
3012 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3013 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3014 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3016 fprintf (ira_dump_file, " Spill");
3017 ira_print_expanded_allocno (a);
3018 fprintf (ira_dump_file, "\n");
3020 continue;
3022 sorted_allocnos[n++] = a;
3024 if (n != 0)
3026 setup_allocno_priorities (sorted_allocnos, n);
3027 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3028 allocno_priority_compare_func);
3029 for (i = 0; i < n; i++)
3031 a = sorted_allocnos[i];
3032 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3034 fprintf (ira_dump_file, " ");
3035 ira_print_expanded_allocno (a);
3036 fprintf (ira_dump_file, " -- ");
3038 if (assign_hard_reg (a, false))
3040 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3041 fprintf (ira_dump_file, "assign hard reg %d\n",
3042 ALLOCNO_HARD_REGNO (a));
3044 else
3046 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3047 fprintf (ira_dump_file, "assign memory\n");
3052 else
3054 form_allocno_hard_regs_nodes_forest ();
3055 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3056 print_hard_regs_forest (ira_dump_file);
3057 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3059 a = ira_allocnos[i];
3060 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3062 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3063 update_costs_from_prefs (a);
3065 else
3067 ALLOCNO_HARD_REGNO (a) = -1;
3068 ALLOCNO_ASSIGNED_P (a) = true;
3069 /* We don't need updated costs anymore. */
3070 ira_free_allocno_updated_costs (a);
3071 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3073 fprintf (ira_dump_file, " Spill");
3074 ira_print_expanded_allocno (a);
3075 fprintf (ira_dump_file, "\n");
3079 /* Put the allocnos into the corresponding buckets. */
3080 colorable_allocno_bucket = NULL;
3081 uncolorable_allocno_bucket = NULL;
3082 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3084 a = ira_allocnos[i];
3085 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3086 put_allocno_into_bucket (a);
3088 push_allocnos_to_stack ();
3089 pop_allocnos_from_stack ();
3090 finish_allocno_hard_regs_nodes_forest ();
3092 improve_allocation ();
3097 /* Output information about the loop given by its LOOP_TREE_NODE. */
3098 static void
3099 print_loop_title (ira_loop_tree_node_t loop_tree_node)
3101 unsigned int j;
3102 bitmap_iterator bi;
3103 ira_loop_tree_node_t subloop_node, dest_loop_node;
3104 edge e;
3105 edge_iterator ei;
3107 if (loop_tree_node->parent == NULL)
3108 fprintf (ira_dump_file,
3109 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3110 NUM_FIXED_BLOCKS);
3111 else
3113 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3114 fprintf (ira_dump_file,
3115 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3116 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3117 loop_tree_node->loop->header->index,
3118 loop_depth (loop_tree_node->loop));
3120 for (subloop_node = loop_tree_node->children;
3121 subloop_node != NULL;
3122 subloop_node = subloop_node->next)
3123 if (subloop_node->bb != NULL)
3125 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3126 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
3127 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3128 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3129 != loop_tree_node))
3130 fprintf (ira_dump_file, "(->%d:l%d)",
3131 e->dest->index, dest_loop_node->loop_num);
3133 fprintf (ira_dump_file, "\n all:");
3134 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3135 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3136 fprintf (ira_dump_file, "\n modified regnos:");
3137 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3138 fprintf (ira_dump_file, " %d", j);
3139 fprintf (ira_dump_file, "\n border:");
3140 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3141 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3142 fprintf (ira_dump_file, "\n Pressure:");
3143 for (j = 0; (int) j < ira_pressure_classes_num; j++)
3145 enum reg_class pclass;
3147 pclass = ira_pressure_classes[j];
3148 if (loop_tree_node->reg_pressure[pclass] == 0)
3149 continue;
3150 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3151 loop_tree_node->reg_pressure[pclass]);
3153 fprintf (ira_dump_file, "\n");
3156 /* Color the allocnos inside loop (in the extreme case it can be all
3157 of the function) given the corresponding LOOP_TREE_NODE. The
3158 function is called for each loop during top-down traverse of the
3159 loop tree. */
3160 static void
3161 color_pass (ira_loop_tree_node_t loop_tree_node)
3163 int regno, hard_regno, index = -1, n;
3164 int cost, exit_freq, enter_freq;
3165 unsigned int j;
3166 bitmap_iterator bi;
3167 machine_mode mode;
3168 enum reg_class rclass, aclass, pclass;
3169 ira_allocno_t a, subloop_allocno;
3170 ira_loop_tree_node_t subloop_node;
3172 ira_assert (loop_tree_node->bb == NULL);
3173 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3174 print_loop_title (loop_tree_node);
3176 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3177 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3178 n = 0;
3179 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3181 a = ira_allocnos[j];
3182 n++;
3183 if (! ALLOCNO_ASSIGNED_P (a))
3184 continue;
3185 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3187 allocno_color_data
3188 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3189 * n);
3190 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3191 curr_allocno_process = 0;
3192 n = 0;
3193 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3195 a = ira_allocnos[j];
3196 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3197 n++;
3199 init_allocno_threads ();
3200 /* Color all mentioned allocnos including transparent ones. */
3201 color_allocnos ();
3202 /* Process caps. They are processed just once. */
3203 if (flag_ira_region == IRA_REGION_MIXED
3204 || flag_ira_region == IRA_REGION_ALL)
3205 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3207 a = ira_allocnos[j];
3208 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3209 continue;
3210 /* Remove from processing in the next loop. */
3211 bitmap_clear_bit (consideration_allocno_bitmap, j);
3212 rclass = ALLOCNO_CLASS (a);
3213 pclass = ira_pressure_class_translate[rclass];
3214 if (flag_ira_region == IRA_REGION_MIXED
3215 && (loop_tree_node->reg_pressure[pclass]
3216 <= ira_class_hard_regs_num[pclass]))
3218 mode = ALLOCNO_MODE (a);
3219 hard_regno = ALLOCNO_HARD_REGNO (a);
3220 if (hard_regno >= 0)
3222 index = ira_class_hard_reg_index[rclass][hard_regno];
3223 ira_assert (index >= 0);
3225 regno = ALLOCNO_REGNO (a);
3226 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3227 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3228 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3229 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3230 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3231 if (hard_regno >= 0)
3232 update_costs_from_copies (subloop_allocno, true, true);
3233 /* We don't need updated costs anymore. */
3234 ira_free_allocno_updated_costs (subloop_allocno);
3237 /* Update costs of the corresponding allocnos (not caps) in the
3238 subloops. */
3239 for (subloop_node = loop_tree_node->subloops;
3240 subloop_node != NULL;
3241 subloop_node = subloop_node->subloop_next)
3243 ira_assert (subloop_node->bb == NULL);
3244 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3246 a = ira_allocnos[j];
3247 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3248 mode = ALLOCNO_MODE (a);
3249 rclass = ALLOCNO_CLASS (a);
3250 pclass = ira_pressure_class_translate[rclass];
3251 hard_regno = ALLOCNO_HARD_REGNO (a);
3252 /* Use hard register class here. ??? */
3253 if (hard_regno >= 0)
3255 index = ira_class_hard_reg_index[rclass][hard_regno];
3256 ira_assert (index >= 0);
3258 regno = ALLOCNO_REGNO (a);
3259 /* ??? conflict costs */
3260 subloop_allocno = subloop_node->regno_allocno_map[regno];
3261 if (subloop_allocno == NULL
3262 || ALLOCNO_CAP (subloop_allocno) != NULL)
3263 continue;
3264 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
3265 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3266 ALLOCNO_NUM (subloop_allocno)));
3267 if ((flag_ira_region == IRA_REGION_MIXED
3268 && (loop_tree_node->reg_pressure[pclass]
3269 <= ira_class_hard_regs_num[pclass]))
3270 || (pic_offset_table_rtx != NULL
3271 && regno == (int) REGNO (pic_offset_table_rtx))
3272 /* Avoid overlapped multi-registers. Moves between them
3273 might result in wrong code generation. */
3274 || (hard_regno >= 0
3275 && ira_reg_class_max_nregs[pclass][mode] > 1))
3277 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3279 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3280 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3281 if (hard_regno >= 0)
3282 update_costs_from_copies (subloop_allocno, true, true);
3283 /* We don't need updated costs anymore. */
3284 ira_free_allocno_updated_costs (subloop_allocno);
3286 continue;
3288 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3289 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3290 ira_assert (regno < ira_reg_equiv_len);
3291 if (ira_equiv_no_lvalue_p (regno))
3293 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3295 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3296 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3297 if (hard_regno >= 0)
3298 update_costs_from_copies (subloop_allocno, true, true);
3299 /* We don't need updated costs anymore. */
3300 ira_free_allocno_updated_costs (subloop_allocno);
3303 else if (hard_regno < 0)
3305 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3306 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3307 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3309 else
3311 aclass = ALLOCNO_CLASS (subloop_allocno);
3312 ira_init_register_move_cost_if_necessary (mode);
3313 cost = (ira_register_move_cost[mode][rclass][rclass]
3314 * (exit_freq + enter_freq));
3315 ira_allocate_and_set_or_copy_costs
3316 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3317 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
3318 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3319 ira_allocate_and_set_or_copy_costs
3320 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
3321 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
3322 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3323 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
3324 -= cost;
3325 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3326 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
3327 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3328 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
3329 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3330 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3331 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
3335 ira_free (allocno_color_data);
3336 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3338 a = ira_allocnos[j];
3339 ALLOCNO_ADD_DATA (a) = NULL;
3343 /* Initialize the common data for coloring and calls functions to do
3344 Chaitin-Briggs and regional coloring. */
3345 static void
3346 do_coloring (void)
3348 coloring_allocno_bitmap = ira_allocate_bitmap ();
3349 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3350 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3352 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3354 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3355 ira_print_disposition (ira_dump_file);
3357 ira_free_bitmap (coloring_allocno_bitmap);
3362 /* Move spill/restore code, which are to be generated in ira-emit.c,
3363 to less frequent points (if it is profitable) by reassigning some
3364 allocnos (in loop with subloops containing in another loop) to
3365 memory which results in longer live-range where the corresponding
3366 pseudo-registers will be in memory. */
3367 static void
3368 move_spill_restore (void)
3370 int cost, regno, hard_regno, hard_regno2, index;
3371 bool changed_p;
3372 int enter_freq, exit_freq;
3373 machine_mode mode;
3374 enum reg_class rclass;
3375 ira_allocno_t a, parent_allocno, subloop_allocno;
3376 ira_loop_tree_node_t parent, loop_node, subloop_node;
3377 ira_allocno_iterator ai;
3379 for (;;)
3381 changed_p = false;
3382 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3383 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3384 FOR_EACH_ALLOCNO (a, ai)
3386 regno = ALLOCNO_REGNO (a);
3387 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3388 if (ALLOCNO_CAP_MEMBER (a) != NULL
3389 || ALLOCNO_CAP (a) != NULL
3390 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3391 || loop_node->children == NULL
3392 /* don't do the optimization because it can create
3393 copies and the reload pass can spill the allocno set
3394 by copy although the allocno will not get memory
3395 slot. */
3396 || ira_equiv_no_lvalue_p (regno)
3397 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
3398 continue;
3399 mode = ALLOCNO_MODE (a);
3400 rclass = ALLOCNO_CLASS (a);
3401 index = ira_class_hard_reg_index[rclass][hard_regno];
3402 ira_assert (index >= 0);
3403 cost = (ALLOCNO_MEMORY_COST (a)
3404 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3405 ? ALLOCNO_CLASS_COST (a)
3406 : ALLOCNO_HARD_REG_COSTS (a)[index]));
3407 ira_init_register_move_cost_if_necessary (mode);
3408 for (subloop_node = loop_node->subloops;
3409 subloop_node != NULL;
3410 subloop_node = subloop_node->subloop_next)
3412 ira_assert (subloop_node->bb == NULL);
3413 subloop_allocno = subloop_node->regno_allocno_map[regno];
3414 if (subloop_allocno == NULL)
3415 continue;
3416 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
3417 /* We have accumulated cost. To get the real cost of
3418 allocno usage in the loop we should subtract costs of
3419 the subloop allocnos. */
3420 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3421 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
3422 ? ALLOCNO_CLASS_COST (subloop_allocno)
3423 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3424 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3425 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3426 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3427 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3428 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3429 else
3431 cost
3432 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3433 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3434 if (hard_regno2 != hard_regno)
3435 cost -= (ira_register_move_cost[mode][rclass][rclass]
3436 * (exit_freq + enter_freq));
3439 if ((parent = loop_node->parent) != NULL
3440 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3442 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
3443 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3444 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3445 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3446 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3447 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3448 else
3450 cost
3451 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3452 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3453 if (hard_regno2 != hard_regno)
3454 cost -= (ira_register_move_cost[mode][rclass][rclass]
3455 * (exit_freq + enter_freq));
3458 if (cost < 0)
3460 ALLOCNO_HARD_REGNO (a) = -1;
3461 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3463 fprintf
3464 (ira_dump_file,
3465 " Moving spill/restore for a%dr%d up from loop %d",
3466 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3467 fprintf (ira_dump_file, " - profit %d\n", -cost);
3469 changed_p = true;
3472 if (! changed_p)
3473 break;
3479 /* Update current hard reg costs and current conflict hard reg costs
3480 for allocno A. It is done by processing its copies containing
3481 other allocnos already assigned. */
3482 static void
3483 update_curr_costs (ira_allocno_t a)
3485 int i, hard_regno, cost;
3486 machine_mode mode;
3487 enum reg_class aclass, rclass;
3488 ira_allocno_t another_a;
3489 ira_copy_t cp, next_cp;
3491 ira_free_allocno_updated_costs (a);
3492 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3493 aclass = ALLOCNO_CLASS (a);
3494 if (aclass == NO_REGS)
3495 return;
3496 mode = ALLOCNO_MODE (a);
3497 ira_init_register_move_cost_if_necessary (mode);
3498 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3500 if (cp->first == a)
3502 next_cp = cp->next_first_allocno_copy;
3503 another_a = cp->second;
3505 else if (cp->second == a)
3507 next_cp = cp->next_second_allocno_copy;
3508 another_a = cp->first;
3510 else
3511 gcc_unreachable ();
3512 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3513 || ! ALLOCNO_ASSIGNED_P (another_a)
3514 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3515 continue;
3516 rclass = REGNO_REG_CLASS (hard_regno);
3517 i = ira_class_hard_reg_index[aclass][hard_regno];
3518 if (i < 0)
3519 continue;
3520 cost = (cp->first == a
3521 ? ira_register_move_cost[mode][rclass][aclass]
3522 : ira_register_move_cost[mode][aclass][rclass]);
3523 ira_allocate_and_set_or_copy_costs
3524 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3525 ALLOCNO_HARD_REG_COSTS (a));
3526 ira_allocate_and_set_or_copy_costs
3527 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3528 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3529 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3530 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3534 /* Try to assign hard registers to the unassigned allocnos and
3535 allocnos conflicting with them or conflicting with allocnos whose
3536 regno >= START_REGNO. The function is called after ira_flattening,
3537 so more allocnos (including ones created in ira-emit.c) will have a
3538 chance to get a hard register. We use simple assignment algorithm
3539 based on priorities. */
3540 void
3541 ira_reassign_conflict_allocnos (int start_regno)
3543 int i, allocnos_to_color_num;
3544 ira_allocno_t a;
3545 enum reg_class aclass;
3546 bitmap allocnos_to_color;
3547 ira_allocno_iterator ai;
3549 allocnos_to_color = ira_allocate_bitmap ();
3550 allocnos_to_color_num = 0;
3551 FOR_EACH_ALLOCNO (a, ai)
3553 int n = ALLOCNO_NUM_OBJECTS (a);
3555 if (! ALLOCNO_ASSIGNED_P (a)
3556 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3558 if (ALLOCNO_CLASS (a) != NO_REGS)
3559 sorted_allocnos[allocnos_to_color_num++] = a;
3560 else
3562 ALLOCNO_ASSIGNED_P (a) = true;
3563 ALLOCNO_HARD_REGNO (a) = -1;
3564 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3565 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3567 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3569 if (ALLOCNO_REGNO (a) < start_regno
3570 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3571 continue;
3572 for (i = 0; i < n; i++)
3574 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3575 ira_object_t conflict_obj;
3576 ira_object_conflict_iterator oci;
3578 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3580 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3582 ira_assert (ira_reg_classes_intersect_p
3583 [aclass][ALLOCNO_CLASS (conflict_a)]);
3584 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3585 continue;
3586 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3590 ira_free_bitmap (allocnos_to_color);
3591 if (allocnos_to_color_num > 1)
3593 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3594 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3595 allocno_priority_compare_func);
3597 for (i = 0; i < allocnos_to_color_num; i++)
3599 a = sorted_allocnos[i];
3600 ALLOCNO_ASSIGNED_P (a) = false;
3601 update_curr_costs (a);
3603 for (i = 0; i < allocnos_to_color_num; i++)
3605 a = sorted_allocnos[i];
3606 if (assign_hard_reg (a, true))
3608 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3609 fprintf
3610 (ira_dump_file,
3611 " Secondary allocation: assign hard reg %d to reg %d\n",
3612 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3619 /* This page contains functions used to find conflicts using allocno
3620 live ranges. */
3622 #ifdef ENABLE_IRA_CHECKING
3624 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3625 intersect. This should be used when there is only one region.
3626 Currently this is used during reload. */
3627 static bool
3628 conflict_by_live_ranges_p (int regno1, int regno2)
3630 ira_allocno_t a1, a2;
3632 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3633 && regno2 >= FIRST_PSEUDO_REGISTER);
3634 /* Reg info calculated by dataflow infrastructure can be different
3635 from one calculated by regclass. */
3636 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3637 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3638 return false;
3639 return allocnos_conflict_by_live_ranges_p (a1, a2);
3642 #endif
3646 /* This page contains code to coalesce memory stack slots used by
3647 spilled allocnos. This results in smaller stack frame, better data
3648 locality, and in smaller code for some architectures like
3649 x86/x86_64 where insn size depends on address displacement value.
3650 On the other hand, it can worsen insn scheduling after the RA but
3651 in practice it is less important than smaller stack frames. */
3653 /* TRUE if we coalesced some allocnos. In other words, if we got
3654 loops formed by members first_coalesced_allocno and
3655 next_coalesced_allocno containing more one allocno. */
3656 static bool allocno_coalesced_p;
3658 /* Bitmap used to prevent a repeated allocno processing because of
3659 coalescing. */
3660 static bitmap processed_coalesced_allocno_bitmap;
3662 /* See below. */
3663 typedef struct coalesce_data *coalesce_data_t;
3665 /* To decrease footprint of ira_allocno structure we store all data
3666 needed only for coalescing in the following structure. */
3667 struct coalesce_data
3669 /* Coalesced allocnos form a cyclic list. One allocno given by
3670 FIRST represents all coalesced allocnos. The
3671 list is chained by NEXT. */
3672 ira_allocno_t first;
3673 ira_allocno_t next;
3674 int temp;
3677 /* Container for storing allocno data concerning coalescing. */
3678 static coalesce_data_t allocno_coalesce_data;
3680 /* Macro to access the data concerning coalescing. */
3681 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3683 /* Merge two sets of coalesced allocnos given correspondingly by
3684 allocnos A1 and A2 (more accurately merging A2 set into A1
3685 set). */
3686 static void
3687 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3689 ira_allocno_t a, first, last, next;
3691 first = ALLOCNO_COALESCE_DATA (a1)->first;
3692 a = ALLOCNO_COALESCE_DATA (a2)->first;
3693 if (first == a)
3694 return;
3695 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3696 a = ALLOCNO_COALESCE_DATA (a)->next)
3698 ALLOCNO_COALESCE_DATA (a)->first = first;
3699 if (a == a2)
3700 break;
3701 last = a;
3703 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3704 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3705 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3708 /* Return TRUE if there are conflicting allocnos from two sets of
3709 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3710 use live ranges to find conflicts because conflicts are represented
3711 only for allocnos of the same allocno class and during the reload
3712 pass we coalesce allocnos for sharing stack memory slots. */
3713 static bool
3714 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3716 ira_allocno_t a, conflict_a;
3718 if (allocno_coalesced_p)
3720 bitmap_clear (processed_coalesced_allocno_bitmap);
3721 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3722 a = ALLOCNO_COALESCE_DATA (a)->next)
3724 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3725 if (a == a1)
3726 break;
3729 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3730 a = ALLOCNO_COALESCE_DATA (a)->next)
3732 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3733 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3735 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3736 return true;
3737 if (conflict_a == a1)
3738 break;
3740 if (a == a2)
3741 break;
3743 return false;
3746 /* The major function for aggressive allocno coalescing. We coalesce
3747 only spilled allocnos. If some allocnos have been coalesced, we
3748 set up flag allocno_coalesced_p. */
3749 static void
3750 coalesce_allocnos (void)
3752 ira_allocno_t a;
3753 ira_copy_t cp, next_cp;
3754 unsigned int j;
3755 int i, n, cp_num, regno;
3756 bitmap_iterator bi;
3758 cp_num = 0;
3759 /* Collect copies. */
3760 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3762 a = ira_allocnos[j];
3763 regno = ALLOCNO_REGNO (a);
3764 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3765 || ira_equiv_no_lvalue_p (regno))
3766 continue;
3767 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3769 if (cp->first == a)
3771 next_cp = cp->next_first_allocno_copy;
3772 regno = ALLOCNO_REGNO (cp->second);
3773 /* For priority coloring we coalesce allocnos only with
3774 the same allocno class not with intersected allocno
3775 classes as it were possible. It is done for
3776 simplicity. */
3777 if ((cp->insn != NULL || cp->constraint_p)
3778 && ALLOCNO_ASSIGNED_P (cp->second)
3779 && ALLOCNO_HARD_REGNO (cp->second) < 0
3780 && ! ira_equiv_no_lvalue_p (regno))
3781 sorted_copies[cp_num++] = cp;
3783 else if (cp->second == a)
3784 next_cp = cp->next_second_allocno_copy;
3785 else
3786 gcc_unreachable ();
3789 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3790 /* Coalesced copies, most frequently executed first. */
3791 for (; cp_num != 0;)
3793 for (i = 0; i < cp_num; i++)
3795 cp = sorted_copies[i];
3796 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3798 allocno_coalesced_p = true;
3799 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3800 fprintf
3801 (ira_dump_file,
3802 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3803 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3804 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3805 cp->freq);
3806 merge_allocnos (cp->first, cp->second);
3807 i++;
3808 break;
3811 /* Collect the rest of copies. */
3812 for (n = 0; i < cp_num; i++)
3814 cp = sorted_copies[i];
3815 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3816 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3817 sorted_copies[n++] = cp;
3819 cp_num = n;
3823 /* Usage cost and order number of coalesced allocno set to which
3824 given pseudo register belongs to. */
3825 static int *regno_coalesced_allocno_cost;
3826 static int *regno_coalesced_allocno_num;
3828 /* Sort pseudos according frequencies of coalesced allocno sets they
3829 belong to (putting most frequently ones first), and according to
3830 coalesced allocno set order numbers. */
3831 static int
3832 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3834 const int regno1 = *(const int *) v1p;
3835 const int regno2 = *(const int *) v2p;
3836 int diff;
3838 if ((diff = (regno_coalesced_allocno_cost[regno2]
3839 - regno_coalesced_allocno_cost[regno1])) != 0)
3840 return diff;
3841 if ((diff = (regno_coalesced_allocno_num[regno1]
3842 - regno_coalesced_allocno_num[regno2])) != 0)
3843 return diff;
3844 return regno1 - regno2;
3847 /* Widest width in which each pseudo reg is referred to (via subreg).
3848 It is used for sorting pseudo registers. */
3849 static unsigned int *regno_max_ref_width;
3851 /* Sort pseudos according their slot numbers (putting ones with
3852 smaller numbers first, or last when the frame pointer is not
3853 needed). */
3854 static int
3855 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3857 const int regno1 = *(const int *) v1p;
3858 const int regno2 = *(const int *) v2p;
3859 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3860 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3861 int diff, slot_num1, slot_num2;
3862 int total_size1, total_size2;
3864 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3866 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3867 return regno1 - regno2;
3868 return 1;
3870 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3871 return -1;
3872 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3873 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3874 if ((diff = slot_num1 - slot_num2) != 0)
3875 return (frame_pointer_needed
3876 || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
3877 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3878 regno_max_ref_width[regno1]);
3879 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3880 regno_max_ref_width[regno2]);
3881 if ((diff = total_size2 - total_size1) != 0)
3882 return diff;
3883 return regno1 - regno2;
3886 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3887 for coalesced allocno sets containing allocnos with their regnos
3888 given in array PSEUDO_REGNOS of length N. */
3889 static void
3890 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3892 int i, num, regno, cost;
3893 ira_allocno_t allocno, a;
3895 for (num = i = 0; i < n; i++)
3897 regno = pseudo_regnos[i];
3898 allocno = ira_regno_allocno_map[regno];
3899 if (allocno == NULL)
3901 regno_coalesced_allocno_cost[regno] = 0;
3902 regno_coalesced_allocno_num[regno] = ++num;
3903 continue;
3905 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3906 continue;
3907 num++;
3908 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3909 a = ALLOCNO_COALESCE_DATA (a)->next)
3911 cost += ALLOCNO_FREQ (a);
3912 if (a == allocno)
3913 break;
3915 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3916 a = ALLOCNO_COALESCE_DATA (a)->next)
3918 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3919 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3920 if (a == allocno)
3921 break;
3926 /* Collect spilled allocnos representing coalesced allocno sets (the
3927 first coalesced allocno). The collected allocnos are returned
3928 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3929 number of the collected allocnos. The allocnos are given by their
3930 regnos in array PSEUDO_REGNOS of length N. */
3931 static int
3932 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3933 ira_allocno_t *spilled_coalesced_allocnos)
3935 int i, num, regno;
3936 ira_allocno_t allocno;
3938 for (num = i = 0; i < n; i++)
3940 regno = pseudo_regnos[i];
3941 allocno = ira_regno_allocno_map[regno];
3942 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3943 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3944 continue;
3945 spilled_coalesced_allocnos[num++] = allocno;
3947 return num;
3950 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3951 given slot contains live ranges of coalesced allocnos assigned to
3952 given slot. */
3953 static live_range_t *slot_coalesced_allocnos_live_ranges;
3955 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3956 ranges intersected with live ranges of coalesced allocnos assigned
3957 to slot with number N. */
3958 static bool
3959 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3961 ira_allocno_t a;
3963 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3964 a = ALLOCNO_COALESCE_DATA (a)->next)
3966 int i;
3967 int nr = ALLOCNO_NUM_OBJECTS (a);
3969 for (i = 0; i < nr; i++)
3971 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3973 if (ira_live_ranges_intersect_p
3974 (slot_coalesced_allocnos_live_ranges[n],
3975 OBJECT_LIVE_RANGES (obj)))
3976 return true;
3978 if (a == allocno)
3979 break;
3981 return false;
3984 /* Update live ranges of slot to which coalesced allocnos represented
3985 by ALLOCNO were assigned. */
3986 static void
3987 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
3989 int i, n;
3990 ira_allocno_t a;
3991 live_range_t r;
3993 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
3994 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3995 a = ALLOCNO_COALESCE_DATA (a)->next)
3997 int nr = ALLOCNO_NUM_OBJECTS (a);
3998 for (i = 0; i < nr; i++)
4000 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4002 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
4003 slot_coalesced_allocnos_live_ranges[n]
4004 = ira_merge_live_ranges
4005 (slot_coalesced_allocnos_live_ranges[n], r);
4007 if (a == allocno)
4008 break;
4012 /* We have coalesced allocnos involving in copies. Coalesce allocnos
4013 further in order to share the same memory stack slot. Allocnos
4014 representing sets of allocnos coalesced before the call are given
4015 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
4016 some allocnos were coalesced in the function. */
4017 static bool
4018 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
4020 int i, j, n, last_coalesced_allocno_num;
4021 ira_allocno_t allocno, a;
4022 bool merged_p = false;
4023 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4025 slot_coalesced_allocnos_live_ranges
4026 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4027 memset (slot_coalesced_allocnos_live_ranges, 0,
4028 sizeof (live_range_t) * ira_allocnos_num);
4029 last_coalesced_allocno_num = 0;
4030 /* Coalesce non-conflicting spilled allocnos preferring most
4031 frequently used. */
4032 for (i = 0; i < num; i++)
4034 allocno = spilled_coalesced_allocnos[i];
4035 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4036 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
4037 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4038 continue;
4039 for (j = 0; j < i; j++)
4041 a = spilled_coalesced_allocnos[j];
4042 n = ALLOCNO_COALESCE_DATA (a)->temp;
4043 if (ALLOCNO_COALESCE_DATA (a)->first == a
4044 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
4045 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
4046 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4047 break;
4049 if (j >= i)
4051 /* No coalescing: set up number for coalesced allocnos
4052 represented by ALLOCNO. */
4053 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
4054 setup_slot_coalesced_allocno_live_ranges (allocno);
4056 else
4058 allocno_coalesced_p = true;
4059 merged_p = true;
4060 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4061 fprintf (ira_dump_file,
4062 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4063 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4064 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
4065 ALLOCNO_COALESCE_DATA (allocno)->temp
4066 = ALLOCNO_COALESCE_DATA (a)->temp;
4067 setup_slot_coalesced_allocno_live_ranges (allocno);
4068 merge_allocnos (a, allocno);
4069 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
4072 for (i = 0; i < ira_allocnos_num; i++)
4073 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4074 ira_free (slot_coalesced_allocnos_live_ranges);
4075 return merged_p;
4078 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4079 subsequent assigning stack slots to them in the reload pass. To do
4080 this we coalesce spilled allocnos first to decrease the number of
4081 memory-memory move insns. This function is called by the
4082 reload. */
4083 void
4084 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4085 unsigned int *reg_max_ref_width)
4087 int max_regno = max_reg_num ();
4088 int i, regno, num, slot_num;
4089 ira_allocno_t allocno, a;
4090 ira_allocno_iterator ai;
4091 ira_allocno_t *spilled_coalesced_allocnos;
4093 ira_assert (! ira_use_lra_p);
4095 /* Set up allocnos can be coalesced. */
4096 coloring_allocno_bitmap = ira_allocate_bitmap ();
4097 for (i = 0; i < n; i++)
4099 regno = pseudo_regnos[i];
4100 allocno = ira_regno_allocno_map[regno];
4101 if (allocno != NULL)
4102 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
4104 allocno_coalesced_p = false;
4105 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4106 allocno_coalesce_data
4107 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4108 * ira_allocnos_num);
4109 /* Initialize coalesce data for allocnos. */
4110 FOR_EACH_ALLOCNO (a, ai)
4112 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4113 ALLOCNO_COALESCE_DATA (a)->first = a;
4114 ALLOCNO_COALESCE_DATA (a)->next = a;
4116 coalesce_allocnos ();
4117 ira_free_bitmap (coloring_allocno_bitmap);
4118 regno_coalesced_allocno_cost
4119 = (int *) ira_allocate (max_regno * sizeof (int));
4120 regno_coalesced_allocno_num
4121 = (int *) ira_allocate (max_regno * sizeof (int));
4122 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4123 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4124 /* Sort regnos according frequencies of the corresponding coalesced
4125 allocno sets. */
4126 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4127 spilled_coalesced_allocnos
4128 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4129 * sizeof (ira_allocno_t));
4130 /* Collect allocnos representing the spilled coalesced allocno
4131 sets. */
4132 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4133 spilled_coalesced_allocnos);
4134 if (flag_ira_share_spill_slots
4135 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4137 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4138 qsort (pseudo_regnos, n, sizeof (int),
4139 coalesced_pseudo_reg_freq_compare);
4140 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4141 spilled_coalesced_allocnos);
4143 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4144 allocno_coalesced_p = false;
4145 /* Assign stack slot numbers to spilled allocno sets, use smaller
4146 numbers for most frequently used coalesced allocnos. -1 is
4147 reserved for dynamic search of stack slots for pseudos spilled by
4148 the reload. */
4149 slot_num = 1;
4150 for (i = 0; i < num; i++)
4152 allocno = spilled_coalesced_allocnos[i];
4153 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4154 || ALLOCNO_HARD_REGNO (allocno) >= 0
4155 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4156 continue;
4157 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4158 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4159 slot_num++;
4160 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4161 a = ALLOCNO_COALESCE_DATA (a)->next)
4163 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4164 ALLOCNO_HARD_REGNO (a) = -slot_num;
4165 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4166 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
4167 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
4168 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
4169 reg_max_ref_width[ALLOCNO_REGNO (a)]));
4171 if (a == allocno)
4172 break;
4174 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4175 fprintf (ira_dump_file, "\n");
4177 ira_spilled_reg_stack_slots_num = slot_num - 1;
4178 ira_free (spilled_coalesced_allocnos);
4179 /* Sort regnos according the slot numbers. */
4180 regno_max_ref_width = reg_max_ref_width;
4181 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
4182 FOR_EACH_ALLOCNO (a, ai)
4183 ALLOCNO_ADD_DATA (a) = NULL;
4184 ira_free (allocno_coalesce_data);
4185 ira_free (regno_coalesced_allocno_num);
4186 ira_free (regno_coalesced_allocno_cost);
4191 /* This page contains code used by the reload pass to improve the
4192 final code. */
4194 /* The function is called from reload to mark changes in the
4195 allocation of REGNO made by the reload. Remember that reg_renumber
4196 reflects the change result. */
4197 void
4198 ira_mark_allocation_change (int regno)
4200 ira_allocno_t a = ira_regno_allocno_map[regno];
4201 int old_hard_regno, hard_regno, cost;
4202 enum reg_class aclass = ALLOCNO_CLASS (a);
4204 ira_assert (a != NULL);
4205 hard_regno = reg_renumber[regno];
4206 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4207 return;
4208 if (old_hard_regno < 0)
4209 cost = -ALLOCNO_MEMORY_COST (a);
4210 else
4212 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
4213 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
4214 ? ALLOCNO_CLASS_COST (a)
4215 : ALLOCNO_HARD_REG_COSTS (a)
4216 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
4217 update_costs_from_copies (a, false, false);
4219 ira_overall_cost -= cost;
4220 ALLOCNO_HARD_REGNO (a) = hard_regno;
4221 if (hard_regno < 0)
4223 ALLOCNO_HARD_REGNO (a) = -1;
4224 cost += ALLOCNO_MEMORY_COST (a);
4226 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
4228 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
4229 ? ALLOCNO_CLASS_COST (a)
4230 : ALLOCNO_HARD_REG_COSTS (a)
4231 [ira_class_hard_reg_index[aclass][hard_regno]]);
4232 update_costs_from_copies (a, true, false);
4234 else
4235 /* Reload changed class of the allocno. */
4236 cost = 0;
4237 ira_overall_cost += cost;
4240 /* This function is called when reload deletes memory-memory move. In
4241 this case we marks that the allocation of the corresponding
4242 allocnos should be not changed in future. Otherwise we risk to get
4243 a wrong code. */
4244 void
4245 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4247 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4248 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4250 ira_assert (dst != NULL && src != NULL
4251 && ALLOCNO_HARD_REGNO (dst) < 0
4252 && ALLOCNO_HARD_REGNO (src) < 0);
4253 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4254 ALLOCNO_DONT_REASSIGN_P (src) = true;
4257 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4258 allocno A and return TRUE in the case of success. */
4259 static bool
4260 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4262 int hard_regno;
4263 enum reg_class aclass;
4264 int regno = ALLOCNO_REGNO (a);
4265 HARD_REG_SET saved[2];
4266 int i, n;
4268 n = ALLOCNO_NUM_OBJECTS (a);
4269 for (i = 0; i < n; i++)
4271 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4272 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4273 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4274 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4275 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4276 call_used_reg_set);
4278 ALLOCNO_ASSIGNED_P (a) = false;
4279 aclass = ALLOCNO_CLASS (a);
4280 update_curr_costs (a);
4281 assign_hard_reg (a, true);
4282 hard_regno = ALLOCNO_HARD_REGNO (a);
4283 reg_renumber[regno] = hard_regno;
4284 if (hard_regno < 0)
4285 ALLOCNO_HARD_REGNO (a) = -1;
4286 else
4288 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4289 ira_overall_cost
4290 -= (ALLOCNO_MEMORY_COST (a)
4291 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4292 ? ALLOCNO_CLASS_COST (a)
4293 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4294 [aclass][hard_regno]]));
4295 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
4296 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4297 call_used_reg_set))
4299 ira_assert (flag_caller_saves);
4300 caller_save_needed = 1;
4304 /* If we found a hard register, modify the RTL for the pseudo
4305 register to show the hard register, and mark the pseudo register
4306 live. */
4307 if (reg_renumber[regno] >= 0)
4309 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4310 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4311 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4312 mark_home_live (regno);
4314 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4315 fprintf (ira_dump_file, "\n");
4316 for (i = 0; i < n; i++)
4318 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4319 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4321 return reg_renumber[regno] >= 0;
4324 /* Sort pseudos according their usage frequencies (putting most
4325 frequently ones first). */
4326 static int
4327 pseudo_reg_compare (const void *v1p, const void *v2p)
4329 int regno1 = *(const int *) v1p;
4330 int regno2 = *(const int *) v2p;
4331 int diff;
4333 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4334 return diff;
4335 return regno1 - regno2;
4338 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4339 NUM of them) or spilled pseudos conflicting with pseudos in
4340 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4341 allocation has been changed. The function doesn't use
4342 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4343 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4344 is called by the reload pass at the end of each reload
4345 iteration. */
4346 bool
4347 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4348 HARD_REG_SET bad_spill_regs,
4349 HARD_REG_SET *pseudo_forbidden_regs,
4350 HARD_REG_SET *pseudo_previous_regs,
4351 bitmap spilled)
4353 int i, n, regno;
4354 bool changed_p;
4355 ira_allocno_t a;
4356 HARD_REG_SET forbidden_regs;
4357 bitmap temp = BITMAP_ALLOC (NULL);
4359 /* Add pseudos which conflict with pseudos already in
4360 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4361 to allocating in two steps as some of the conflicts might have
4362 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4363 for (i = 0; i < num; i++)
4364 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4366 for (i = 0, n = num; i < n; i++)
4368 int nr, j;
4369 int regno = spilled_pseudo_regs[i];
4370 bitmap_set_bit (temp, regno);
4372 a = ira_regno_allocno_map[regno];
4373 nr = ALLOCNO_NUM_OBJECTS (a);
4374 for (j = 0; j < nr; j++)
4376 ira_object_t conflict_obj;
4377 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4378 ira_object_conflict_iterator oci;
4380 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4382 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4383 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4384 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4385 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4387 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4388 /* ?!? This seems wrong. */
4389 bitmap_set_bit (consideration_allocno_bitmap,
4390 ALLOCNO_NUM (conflict_a));
4396 if (num > 1)
4397 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4398 changed_p = false;
4399 /* Try to assign hard registers to pseudos from
4400 SPILLED_PSEUDO_REGS. */
4401 for (i = 0; i < num; i++)
4403 regno = spilled_pseudo_regs[i];
4404 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4405 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4406 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4407 gcc_assert (reg_renumber[regno] < 0);
4408 a = ira_regno_allocno_map[regno];
4409 ira_mark_allocation_change (regno);
4410 ira_assert (reg_renumber[regno] < 0);
4411 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4412 fprintf (ira_dump_file,
4413 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4414 ALLOCNO_MEMORY_COST (a)
4415 - ALLOCNO_CLASS_COST (a));
4416 allocno_reload_assign (a, forbidden_regs);
4417 if (reg_renumber[regno] >= 0)
4419 CLEAR_REGNO_REG_SET (spilled, regno);
4420 changed_p = true;
4423 BITMAP_FREE (temp);
4424 return changed_p;
4427 /* The function is called by reload and returns already allocated
4428 stack slot (if any) for REGNO with given INHERENT_SIZE and
4429 TOTAL_SIZE. In the case of failure to find a slot which can be
4430 used for REGNO, the function returns NULL. */
4432 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4433 unsigned int total_size)
4435 unsigned int i;
4436 int slot_num, best_slot_num;
4437 int cost, best_cost;
4438 ira_copy_t cp, next_cp;
4439 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4440 rtx x;
4441 bitmap_iterator bi;
4442 struct ira_spilled_reg_stack_slot *slot = NULL;
4444 ira_assert (! ira_use_lra_p);
4446 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4447 && inherent_size <= total_size
4448 && ALLOCNO_HARD_REGNO (allocno) < 0);
4449 if (! flag_ira_share_spill_slots)
4450 return NULL_RTX;
4451 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4452 if (slot_num != -1)
4454 slot = &ira_spilled_reg_stack_slots[slot_num];
4455 x = slot->mem;
4457 else
4459 best_cost = best_slot_num = -1;
4460 x = NULL_RTX;
4461 /* It means that the pseudo was spilled in the reload pass, try
4462 to reuse a slot. */
4463 for (slot_num = 0;
4464 slot_num < ira_spilled_reg_stack_slots_num;
4465 slot_num++)
4467 slot = &ira_spilled_reg_stack_slots[slot_num];
4468 if (slot->mem == NULL_RTX)
4469 continue;
4470 if (slot->width < total_size
4471 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4472 continue;
4474 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4475 FIRST_PSEUDO_REGISTER, i, bi)
4477 another_allocno = ira_regno_allocno_map[i];
4478 if (allocnos_conflict_by_live_ranges_p (allocno,
4479 another_allocno))
4480 goto cont;
4482 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4483 cp != NULL;
4484 cp = next_cp)
4486 if (cp->first == allocno)
4488 next_cp = cp->next_first_allocno_copy;
4489 another_allocno = cp->second;
4491 else if (cp->second == allocno)
4493 next_cp = cp->next_second_allocno_copy;
4494 another_allocno = cp->first;
4496 else
4497 gcc_unreachable ();
4498 if (cp->insn == NULL_RTX)
4499 continue;
4500 if (bitmap_bit_p (&slot->spilled_regs,
4501 ALLOCNO_REGNO (another_allocno)))
4502 cost += cp->freq;
4504 if (cost > best_cost)
4506 best_cost = cost;
4507 best_slot_num = slot_num;
4509 cont:
4512 if (best_cost >= 0)
4514 slot_num = best_slot_num;
4515 slot = &ira_spilled_reg_stack_slots[slot_num];
4516 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4517 x = slot->mem;
4518 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4521 if (x != NULL_RTX)
4523 ira_assert (slot->width >= total_size);
4524 #ifdef ENABLE_IRA_CHECKING
4525 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4526 FIRST_PSEUDO_REGISTER, i, bi)
4528 ira_assert (! conflict_by_live_ranges_p (regno, i));
4530 #endif
4531 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4532 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4534 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4535 regno, REG_FREQ (regno), slot_num);
4536 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4537 FIRST_PSEUDO_REGISTER, i, bi)
4539 if ((unsigned) regno != i)
4540 fprintf (ira_dump_file, " %d", i);
4542 fprintf (ira_dump_file, "\n");
4545 return x;
4548 /* This is called by reload every time a new stack slot X with
4549 TOTAL_SIZE was allocated for REGNO. We store this info for
4550 subsequent ira_reuse_stack_slot calls. */
4551 void
4552 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4554 struct ira_spilled_reg_stack_slot *slot;
4555 int slot_num;
4556 ira_allocno_t allocno;
4558 ira_assert (! ira_use_lra_p);
4560 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4561 allocno = ira_regno_allocno_map[regno];
4562 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4563 if (slot_num == -1)
4565 slot_num = ira_spilled_reg_stack_slots_num++;
4566 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4568 slot = &ira_spilled_reg_stack_slots[slot_num];
4569 INIT_REG_SET (&slot->spilled_regs);
4570 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4571 slot->mem = x;
4572 slot->width = total_size;
4573 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4574 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4575 regno, REG_FREQ (regno), slot_num);
4579 /* Return spill cost for pseudo-registers whose numbers are in array
4580 REGNOS (with a negative number as an end marker) for reload with
4581 given IN and OUT for INSN. Return also number points (through
4582 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4583 the register pressure is high, number of references of the
4584 pseudo-registers (through NREFS), number of callee-clobbered
4585 hard-registers occupied by the pseudo-registers (through
4586 CALL_USED_COUNT), and the first hard regno occupied by the
4587 pseudo-registers (through FIRST_HARD_REGNO). */
4588 static int
4589 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx_insn *insn,
4590 int *excess_pressure_live_length,
4591 int *nrefs, int *call_used_count, int *first_hard_regno)
4593 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4594 bool in_p, out_p;
4595 int length;
4596 ira_allocno_t a;
4598 *nrefs = 0;
4599 for (length = count = cost = i = 0;; i++)
4601 regno = regnos[i];
4602 if (regno < 0)
4603 break;
4604 *nrefs += REG_N_REFS (regno);
4605 hard_regno = reg_renumber[regno];
4606 ira_assert (hard_regno >= 0);
4607 a = ira_regno_allocno_map[regno];
4608 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4609 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4610 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4611 for (j = 0; j < nregs; j++)
4612 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4613 break;
4614 if (j == nregs)
4615 count++;
4616 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4617 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4618 if ((in_p || out_p)
4619 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4621 saved_cost = 0;
4622 if (in_p)
4623 saved_cost += ira_memory_move_cost
4624 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4625 if (out_p)
4626 saved_cost
4627 += ira_memory_move_cost
4628 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4629 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4632 *excess_pressure_live_length = length;
4633 *call_used_count = count;
4634 hard_regno = -1;
4635 if (regnos[0] >= 0)
4637 hard_regno = reg_renumber[regnos[0]];
4639 *first_hard_regno = hard_regno;
4640 return cost;
4643 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4644 REGNOS is better than spilling pseudo-registers with numbers in
4645 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4646 function used by the reload pass to make better register spilling
4647 decisions. */
4648 bool
4649 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4650 rtx in, rtx out, rtx_insn *insn)
4652 int cost, other_cost;
4653 int length, other_length;
4654 int nrefs, other_nrefs;
4655 int call_used_count, other_call_used_count;
4656 int hard_regno, other_hard_regno;
4658 cost = calculate_spill_cost (regnos, in, out, insn,
4659 &length, &nrefs, &call_used_count, &hard_regno);
4660 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4661 &other_length, &other_nrefs,
4662 &other_call_used_count,
4663 &other_hard_regno);
4664 if (nrefs == 0 && other_nrefs != 0)
4665 return true;
4666 if (nrefs != 0 && other_nrefs == 0)
4667 return false;
4668 if (cost != other_cost)
4669 return cost < other_cost;
4670 if (length != other_length)
4671 return length > other_length;
4672 #ifdef REG_ALLOC_ORDER
4673 if (hard_regno >= 0 && other_hard_regno >= 0)
4674 return (inv_reg_alloc_order[hard_regno]
4675 < inv_reg_alloc_order[other_hard_regno]);
4676 #else
4677 if (call_used_count != other_call_used_count)
4678 return call_used_count > other_call_used_count;
4679 #endif
4680 return false;
4685 /* Allocate and initialize data necessary for assign_hard_reg. */
4686 void
4687 ira_initiate_assign (void)
4689 sorted_allocnos
4690 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4691 * ira_allocnos_num);
4692 consideration_allocno_bitmap = ira_allocate_bitmap ();
4693 initiate_cost_update ();
4694 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4695 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4696 * sizeof (ira_copy_t));
4699 /* Deallocate data used by assign_hard_reg. */
4700 void
4701 ira_finish_assign (void)
4703 ira_free (sorted_allocnos);
4704 ira_free_bitmap (consideration_allocno_bitmap);
4705 finish_cost_update ();
4706 ira_free (allocno_priorities);
4707 ira_free (sorted_copies);
4712 /* Entry function doing color-based register allocation. */
4713 static void
4714 color (void)
4716 allocno_stack_vec.create (ira_allocnos_num);
4717 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4718 ira_initiate_assign ();
4719 do_coloring ();
4720 ira_finish_assign ();
4721 allocno_stack_vec.release ();
4722 move_spill_restore ();
4727 /* This page contains a simple register allocator without usage of
4728 allocno conflicts. This is used for fast allocation for -O0. */
4730 /* Do register allocation by not using allocno conflicts. It uses
4731 only allocno live ranges. The algorithm is close to Chow's
4732 priority coloring. */
4733 static void
4734 fast_allocation (void)
4736 int i, j, k, num, class_size, hard_regno;
4737 #ifdef STACK_REGS
4738 bool no_stack_reg_p;
4739 #endif
4740 enum reg_class aclass;
4741 machine_mode mode;
4742 ira_allocno_t a;
4743 ira_allocno_iterator ai;
4744 live_range_t r;
4745 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4747 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4748 * ira_allocnos_num);
4749 num = 0;
4750 FOR_EACH_ALLOCNO (a, ai)
4751 sorted_allocnos[num++] = a;
4752 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4753 setup_allocno_priorities (sorted_allocnos, num);
4754 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4755 * ira_max_point);
4756 for (i = 0; i < ira_max_point; i++)
4757 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4758 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4759 allocno_priority_compare_func);
4760 for (i = 0; i < num; i++)
4762 int nr, l;
4764 a = sorted_allocnos[i];
4765 nr = ALLOCNO_NUM_OBJECTS (a);
4766 CLEAR_HARD_REG_SET (conflict_hard_regs);
4767 for (l = 0; l < nr; l++)
4769 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4770 IOR_HARD_REG_SET (conflict_hard_regs,
4771 OBJECT_CONFLICT_HARD_REGS (obj));
4772 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4773 for (j = r->start; j <= r->finish; j++)
4774 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4776 aclass = ALLOCNO_CLASS (a);
4777 ALLOCNO_ASSIGNED_P (a) = true;
4778 ALLOCNO_HARD_REGNO (a) = -1;
4779 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4780 conflict_hard_regs))
4781 continue;
4782 mode = ALLOCNO_MODE (a);
4783 #ifdef STACK_REGS
4784 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4785 #endif
4786 class_size = ira_class_hard_regs_num[aclass];
4787 for (j = 0; j < class_size; j++)
4789 hard_regno = ira_class_hard_regs[aclass][j];
4790 #ifdef STACK_REGS
4791 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4792 && hard_regno <= LAST_STACK_REG)
4793 continue;
4794 #endif
4795 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4796 || (TEST_HARD_REG_BIT
4797 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4798 continue;
4799 ALLOCNO_HARD_REGNO (a) = hard_regno;
4800 for (l = 0; l < nr; l++)
4802 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4803 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4804 for (k = r->start; k <= r->finish; k++)
4805 IOR_HARD_REG_SET (used_hard_regs[k],
4806 ira_reg_mode_hard_regset[hard_regno][mode]);
4808 break;
4811 ira_free (sorted_allocnos);
4812 ira_free (used_hard_regs);
4813 ira_free (allocno_priorities);
4814 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4815 ira_print_disposition (ira_dump_file);
4820 /* Entry function doing coloring. */
4821 void
4822 ira_color (void)
4824 ira_allocno_t a;
4825 ira_allocno_iterator ai;
4827 /* Setup updated costs. */
4828 FOR_EACH_ALLOCNO (a, ai)
4830 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4831 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4833 if (ira_conflicts_p)
4834 color ();
4835 else
4836 fast_allocation ();