Merge trunk version 193672 into gupc branch.
[official-gcc.git] / gcc / ira-color.c
blob33f9d24b00b52cab8e99c118fa9eff1b2968301f
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "sbitmap.h"
32 #include "bitmap.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "reload.h"
38 #include "params.h"
39 #include "df.h"
40 #include "ira-int.h"
42 typedef struct allocno_hard_regs *allocno_hard_regs_t;
44 /* The structure contains information about hard registers can be
45 assigned to allocnos. Usually it is allocno profitable hard
46 registers but in some cases this set can be a bit different. Major
47 reason of the difference is a requirement to use hard register sets
48 that form a tree or a forest (set of trees), i.e. hard register set
49 of a node should contain hard register sets of its subnodes. */
50 struct allocno_hard_regs
52 /* Hard registers can be assigned to an allocno. */
53 HARD_REG_SET set;
54 /* Overall (spilling) cost of all allocnos with given register
55 set. */
56 HOST_WIDEST_INT cost;
59 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
61 /* A node representing allocno hard registers. Such nodes form a
62 forest (set of trees). Each subnode of given node in the forest
63 refers for hard register set (usually allocno profitable hard
64 register set) which is a subset of one referred from given
65 node. */
66 struct allocno_hard_regs_node
68 /* Set up number of the node in preorder traversing of the forest. */
69 int preorder_num;
70 /* Used for different calculation like finding conflict size of an
71 allocno. */
72 int check;
73 /* Used for calculation of conflict size of an allocno. The
74 conflict size of the allocno is maximal number of given allocno
75 hard registers needed for allocation of the conflicting allocnos.
76 Given allocno is trivially colored if this number plus the number
77 of hard registers needed for given allocno is not greater than
78 the number of given allocno hard register set. */
79 int conflict_size;
80 /* The number of hard registers given by member hard_regs. */
81 int hard_regs_num;
82 /* The following member is used to form the final forest. */
83 bool used_p;
84 /* Pointer to the corresponding profitable hard registers. */
85 allocno_hard_regs_t hard_regs;
86 /* Parent, first subnode, previous and next node with the same
87 parent in the forest. */
88 allocno_hard_regs_node_t parent, first, prev, next;
91 /* To decrease footprint of ira_allocno structure we store all data
92 needed only for coloring in the following structure. */
93 struct allocno_color_data
95 /* TRUE value means that the allocno was not removed yet from the
96 conflicting graph during colouring. */
97 unsigned int in_graph_p : 1;
98 /* TRUE if it is put on the stack to make other allocnos
99 colorable. */
100 unsigned int may_be_spilled_p : 1;
101 /* TRUE if the allocno is trivially colorable. */
102 unsigned int colorable_p : 1;
103 /* Number of hard registers of the allocno class really
104 available for the allocno allocation. It is number of the
105 profitable hard regs. */
106 int available_regs_num;
107 /* Allocnos in a bucket (used in coloring) chained by the following
108 two members. */
109 ira_allocno_t next_bucket_allocno;
110 ira_allocno_t prev_bucket_allocno;
111 /* Used for temporary purposes. */
112 int temp;
113 /* Used to exclude repeated processing. */
114 int last_process;
115 /* Profitable hard regs available for this pseudo allocation. It
116 means that the set excludes unavailable hard regs and hard regs
117 conflicting with given pseudo. They should be of the allocno
118 class. */
119 HARD_REG_SET profitable_hard_regs;
120 /* The allocno hard registers node. */
121 allocno_hard_regs_node_t hard_regs_node;
122 /* Array of structures allocno_hard_regs_subnode representing
123 given allocno hard registers node (the 1st element in the array)
124 and all its subnodes in the tree (forest) of allocno hard
125 register nodes (see comments above). */
126 int hard_regs_subnodes_start;
127 /* The length of the previous array. */
128 int hard_regs_subnodes_num;
131 /* See above. */
132 typedef struct allocno_color_data *allocno_color_data_t;
134 /* Container for storing allocno data concerning coloring. */
135 static allocno_color_data_t allocno_color_data;
137 /* Macro to access the data concerning coloring. */
138 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
140 /* Used for finding allocno colorability to exclude repeated allocno
141 processing and for updating preferencing to exclude repeated
142 allocno processing during assignment. */
143 static int curr_allocno_process;
145 /* This file contains code for regional graph coloring, spill/restore
146 code placement optimization, and code helping the reload pass to do
147 a better job. */
149 /* Bitmap of allocnos which should be colored. */
150 static bitmap coloring_allocno_bitmap;
152 /* Bitmap of allocnos which should be taken into account during
153 coloring. In general case it contains allocnos from
154 coloring_allocno_bitmap plus other already colored conflicting
155 allocnos. */
156 static bitmap consideration_allocno_bitmap;
158 /* All allocnos sorted according their priorities. */
159 static ira_allocno_t *sorted_allocnos;
161 /* Vec representing the stack of allocnos used during coloring. */
162 static vec<ira_allocno_t> allocno_stack_vec;
164 /* Helper for qsort comparison callbacks - return a positive integer if
165 X > Y, or a negative value otherwise. Use a conditional expression
166 instead of a difference computation to insulate from possible overflow
167 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
168 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
172 /* Definition of vector of allocno hard registers. */
174 /* Vector of unique allocno hard registers. */
175 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
177 /* Returns hash value for allocno hard registers V. */
178 static hashval_t
179 allocno_hard_regs_hash (const void *v)
181 const struct allocno_hard_regs *hv = (const struct allocno_hard_regs *) v;
183 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
186 /* Compares allocno hard registers V1 and V2. */
187 static int
188 allocno_hard_regs_eq (const void *v1, const void *v2)
190 const struct allocno_hard_regs *hv1 = (const struct allocno_hard_regs *) v1;
191 const struct allocno_hard_regs *hv2 = (const struct allocno_hard_regs *) v2;
193 return hard_reg_set_equal_p (hv1->set, hv2->set);
196 /* Hash table of unique allocno hard registers. */
197 static htab_t allocno_hard_regs_htab;
199 /* Return allocno hard registers in the hash table equal to HV. */
200 static allocno_hard_regs_t
201 find_hard_regs (allocno_hard_regs_t hv)
203 return (allocno_hard_regs_t) htab_find (allocno_hard_regs_htab, hv);
206 /* Insert allocno hard registers HV in the hash table (if it is not
207 there yet) and return the value which in the table. */
208 static allocno_hard_regs_t
209 insert_hard_regs (allocno_hard_regs_t hv)
211 PTR *slot = htab_find_slot (allocno_hard_regs_htab, hv, INSERT);
213 if (*slot == NULL)
214 *slot = hv;
215 return (allocno_hard_regs_t) *slot;
218 /* Initialize data concerning allocno hard registers. */
219 static void
220 init_allocno_hard_regs (void)
222 allocno_hard_regs_vec.create (200);
223 allocno_hard_regs_htab
224 = htab_create (200, allocno_hard_regs_hash, allocno_hard_regs_eq, NULL);
227 /* Add (or update info about) allocno hard registers with SET and
228 COST. */
229 static allocno_hard_regs_t
230 add_allocno_hard_regs (HARD_REG_SET set, HOST_WIDEST_INT cost)
232 struct allocno_hard_regs temp;
233 allocno_hard_regs_t hv;
235 gcc_assert (! hard_reg_set_empty_p (set));
236 COPY_HARD_REG_SET (temp.set, set);
237 if ((hv = find_hard_regs (&temp)) != NULL)
238 hv->cost += cost;
239 else
241 hv = ((struct allocno_hard_regs *)
242 ira_allocate (sizeof (struct allocno_hard_regs)));
243 COPY_HARD_REG_SET (hv->set, set);
244 hv->cost = cost;
245 allocno_hard_regs_vec.safe_push (hv);
246 insert_hard_regs (hv);
248 return hv;
251 /* Finalize data concerning allocno hard registers. */
252 static void
253 finish_allocno_hard_regs (void)
255 int i;
256 allocno_hard_regs_t hv;
258 for (i = 0;
259 allocno_hard_regs_vec.iterate (i, &hv);
260 i++)
261 ira_free (hv);
262 htab_delete (allocno_hard_regs_htab);
263 allocno_hard_regs_vec.release ();
266 /* Sort hard regs according to their frequency of usage. */
267 static int
268 allocno_hard_regs_compare (const void *v1p, const void *v2p)
270 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
271 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
273 if (hv2->cost > hv1->cost)
274 return 1;
275 else if (hv2->cost < hv1->cost)
276 return -1;
277 else
278 return 0;
283 /* Used for finding a common ancestor of two allocno hard registers
284 nodes in the forest. We use the current value of
285 'node_check_tick' to mark all nodes from one node to the top and
286 then walking up from another node until we find a marked node.
288 It is also used to figure out allocno colorability as a mark that
289 we already reset value of member 'conflict_size' for the forest
290 node corresponding to the processed allocno. */
291 static int node_check_tick;
293 /* Roots of the forest containing hard register sets can be assigned
294 to allocnos. */
295 static allocno_hard_regs_node_t hard_regs_roots;
297 /* Definition of vector of allocno hard register nodes. */
299 /* Vector used to create the forest. */
300 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
302 /* Create and return allocno hard registers node containing allocno
303 hard registers HV. */
304 static allocno_hard_regs_node_t
305 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
307 allocno_hard_regs_node_t new_node;
309 new_node = ((struct allocno_hard_regs_node *)
310 ira_allocate (sizeof (struct allocno_hard_regs_node)));
311 new_node->check = 0;
312 new_node->hard_regs = hv;
313 new_node->hard_regs_num = hard_reg_set_size (hv->set);
314 new_node->first = NULL;
315 new_node->used_p = false;
316 return new_node;
319 /* Add allocno hard registers node NEW_NODE to the forest on its level
320 given by ROOTS. */
321 static void
322 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
323 allocno_hard_regs_node_t new_node)
325 new_node->next = *roots;
326 if (new_node->next != NULL)
327 new_node->next->prev = new_node;
328 new_node->prev = NULL;
329 *roots = new_node;
332 /* Add allocno hard registers HV (or its best approximation if it is
333 not possible) to the forest on its level given by ROOTS. */
334 static void
335 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
336 allocno_hard_regs_t hv)
338 unsigned int i, start;
339 allocno_hard_regs_node_t node, prev, new_node;
340 HARD_REG_SET temp_set;
341 allocno_hard_regs_t hv2;
343 start = hard_regs_node_vec.length ();
344 for (node = *roots; node != NULL; node = node->next)
346 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
347 return;
348 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
350 add_allocno_hard_regs_to_forest (&node->first, hv);
351 return;
353 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
354 hard_regs_node_vec.safe_push (node);
355 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
357 COPY_HARD_REG_SET (temp_set, hv->set);
358 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
359 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
360 add_allocno_hard_regs_to_forest (&node->first, hv2);
363 if (hard_regs_node_vec.length ()
364 > start + 1)
366 /* Create a new node which contains nodes in hard_regs_node_vec. */
367 CLEAR_HARD_REG_SET (temp_set);
368 for (i = start;
369 i < hard_regs_node_vec.length ();
370 i++)
372 node = hard_regs_node_vec[i];
373 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
375 hv = add_allocno_hard_regs (temp_set, hv->cost);
376 new_node = create_new_allocno_hard_regs_node (hv);
377 prev = NULL;
378 for (i = start;
379 i < hard_regs_node_vec.length ();
380 i++)
382 node = hard_regs_node_vec[i];
383 if (node->prev == NULL)
384 *roots = node->next;
385 else
386 node->prev->next = node->next;
387 if (node->next != NULL)
388 node->next->prev = node->prev;
389 if (prev == NULL)
390 new_node->first = node;
391 else
392 prev->next = node;
393 node->prev = prev;
394 node->next = NULL;
395 prev = node;
397 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
399 hard_regs_node_vec.truncate (start);
402 /* Add allocno hard registers nodes starting with the forest level
403 given by FIRST which contains biggest set inside SET. */
404 static void
405 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
406 HARD_REG_SET set)
408 allocno_hard_regs_node_t node;
410 ira_assert (first != NULL);
411 for (node = first; node != NULL; node = node->next)
412 if (hard_reg_set_subset_p (node->hard_regs->set, set))
413 hard_regs_node_vec.safe_push (node);
414 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
415 collect_allocno_hard_regs_cover (node->first, set);
418 /* Set up field parent as PARENT in all allocno hard registers nodes
419 in forest given by FIRST. */
420 static void
421 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
422 allocno_hard_regs_node_t parent)
424 allocno_hard_regs_node_t node;
426 for (node = first; node != NULL; node = node->next)
428 node->parent = parent;
429 setup_allocno_hard_regs_nodes_parent (node->first, node);
433 /* Return allocno hard registers node which is a first common ancestor
434 node of FIRST and SECOND in the forest. */
435 static allocno_hard_regs_node_t
436 first_common_ancestor_node (allocno_hard_regs_node_t first,
437 allocno_hard_regs_node_t second)
439 allocno_hard_regs_node_t node;
441 node_check_tick++;
442 for (node = first; node != NULL; node = node->parent)
443 node->check = node_check_tick;
444 for (node = second; node != NULL; node = node->parent)
445 if (node->check == node_check_tick)
446 return node;
447 return first_common_ancestor_node (second, first);
450 /* Print hard reg set SET to F. */
451 static void
452 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
454 int i, start;
456 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
458 if (TEST_HARD_REG_BIT (set, i))
460 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
461 start = i;
463 if (start >= 0
464 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
466 if (start == i - 1)
467 fprintf (f, " %d", start);
468 else if (start == i - 2)
469 fprintf (f, " %d %d", start, start + 1);
470 else
471 fprintf (f, " %d-%d", start, i - 1);
472 start = -1;
475 if (new_line_p)
476 fprintf (f, "\n");
479 /* Print allocno hard register subforest given by ROOTS and its LEVEL
480 to F. */
481 static void
482 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
483 int level)
485 int i;
486 allocno_hard_regs_node_t node;
488 for (node = roots; node != NULL; node = node->next)
490 fprintf (f, " ");
491 for (i = 0; i < level * 2; i++)
492 fprintf (f, " ");
493 fprintf (f, "%d:(", node->preorder_num);
494 print_hard_reg_set (f, node->hard_regs->set, false);
495 fprintf (f, ")@" HOST_WIDEST_INT_PRINT_DEC "\n", node->hard_regs->cost);
496 print_hard_regs_subforest (f, node->first, level + 1);
500 /* Print the allocno hard register forest to F. */
501 static void
502 print_hard_regs_forest (FILE *f)
504 fprintf (f, " Hard reg set forest:\n");
505 print_hard_regs_subforest (f, hard_regs_roots, 1);
508 /* Print the allocno hard register forest to stderr. */
509 void
510 ira_debug_hard_regs_forest (void)
512 print_hard_regs_forest (stderr);
515 /* Remove unused allocno hard registers nodes from forest given by its
516 *ROOTS. */
517 static void
518 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
520 allocno_hard_regs_node_t node, prev, next, last;
522 for (prev = NULL, node = *roots; node != NULL; node = next)
524 next = node->next;
525 if (node->used_p)
527 remove_unused_allocno_hard_regs_nodes (&node->first);
528 prev = node;
530 else
532 for (last = node->first;
533 last != NULL && last->next != NULL;
534 last = last->next)
536 if (last != NULL)
538 if (prev == NULL)
539 *roots = node->first;
540 else
541 prev->next = node->first;
542 if (next != NULL)
543 next->prev = last;
544 last->next = next;
545 next = node->first;
547 else
549 if (prev == NULL)
550 *roots = next;
551 else
552 prev->next = next;
553 if (next != NULL)
554 next->prev = prev;
556 ira_free (node);
561 /* Set up fields preorder_num starting with START_NUM in all allocno
562 hard registers nodes in forest given by FIRST. Return biggest set
563 PREORDER_NUM increased by 1. */
564 static int
565 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
566 allocno_hard_regs_node_t parent,
567 int start_num)
569 allocno_hard_regs_node_t node;
571 for (node = first; node != NULL; node = node->next)
573 node->preorder_num = start_num++;
574 node->parent = parent;
575 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
576 start_num);
578 return start_num;
581 /* Number of allocno hard registers nodes in the forest. */
582 static int allocno_hard_regs_nodes_num;
584 /* Table preorder number of allocno hard registers node in the forest
585 -> the allocno hard registers node. */
586 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
588 /* See below. */
589 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
591 /* The structure is used to describes all subnodes (not only immediate
592 ones) in the mentioned above tree for given allocno hard register
593 node. The usage of such data accelerates calculation of
594 colorability of given allocno. */
595 struct allocno_hard_regs_subnode
597 /* The conflict size of conflicting allocnos whose hard register
598 sets are equal sets (plus supersets if given node is given
599 allocno hard registers node) of one in the given node. */
600 int left_conflict_size;
601 /* The summary conflict size of conflicting allocnos whose hard
602 register sets are strict subsets of one in the given node.
603 Overall conflict size is
604 left_conflict_subnodes_size
605 + MIN (max_node_impact - left_conflict_subnodes_size,
606 left_conflict_size)
608 short left_conflict_subnodes_size;
609 short max_node_impact;
612 /* Container for hard regs subnodes of all allocnos. */
613 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
615 /* Table (preorder number of allocno hard registers node in the
616 forest, preorder number of allocno hard registers subnode) -> index
617 of the subnode relative to the node. -1 if it is not a
618 subnode. */
619 static int *allocno_hard_regs_subnode_index;
621 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
622 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
623 static void
624 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
626 allocno_hard_regs_node_t node, parent;
627 int index;
629 for (node = first; node != NULL; node = node->next)
631 allocno_hard_regs_nodes[node->preorder_num] = node;
632 for (parent = node; parent != NULL; parent = parent->parent)
634 index = parent->preorder_num * allocno_hard_regs_nodes_num;
635 allocno_hard_regs_subnode_index[index + node->preorder_num]
636 = node->preorder_num - parent->preorder_num;
638 setup_allocno_hard_regs_subnode_index (node->first);
642 /* Count all allocno hard registers nodes in tree ROOT. */
643 static int
644 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
646 int len = 1;
648 for (root = root->first; root != NULL; root = root->next)
649 len += get_allocno_hard_regs_subnodes_num (root);
650 return len;
653 /* Build the forest of allocno hard registers nodes and assign each
654 allocno a node from the forest. */
655 static void
656 form_allocno_hard_regs_nodes_forest (void)
658 unsigned int i, j, size, len;
659 int start;
660 ira_allocno_t a;
661 allocno_hard_regs_t hv;
662 bitmap_iterator bi;
663 HARD_REG_SET temp;
664 allocno_hard_regs_node_t node, allocno_hard_regs_node;
665 allocno_color_data_t allocno_data;
667 node_check_tick = 0;
668 init_allocno_hard_regs ();
669 hard_regs_roots = NULL;
670 hard_regs_node_vec.create (100);
671 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
672 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
674 CLEAR_HARD_REG_SET (temp);
675 SET_HARD_REG_BIT (temp, i);
676 hv = add_allocno_hard_regs (temp, 0);
677 node = create_new_allocno_hard_regs_node (hv);
678 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
680 start = allocno_hard_regs_vec.length ();
681 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
683 a = ira_allocnos[i];
684 allocno_data = ALLOCNO_COLOR_DATA (a);
686 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
687 continue;
688 hv = (add_allocno_hard_regs
689 (allocno_data->profitable_hard_regs,
690 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
692 SET_HARD_REG_SET (temp);
693 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
694 add_allocno_hard_regs (temp, 0);
695 qsort (allocno_hard_regs_vec.address () + start,
696 allocno_hard_regs_vec.length () - start,
697 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
698 for (i = start;
699 allocno_hard_regs_vec.iterate (i, &hv);
700 i++)
702 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
703 ira_assert (hard_regs_node_vec.length () == 0);
705 /* We need to set up parent fields for right work of
706 first_common_ancestor_node. */
707 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
708 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
710 a = ira_allocnos[i];
711 allocno_data = ALLOCNO_COLOR_DATA (a);
712 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
713 continue;
714 hard_regs_node_vec.truncate (0);
715 collect_allocno_hard_regs_cover (hard_regs_roots,
716 allocno_data->profitable_hard_regs);
717 allocno_hard_regs_node = NULL;
718 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
719 allocno_hard_regs_node
720 = (j == 0
721 ? node
722 : first_common_ancestor_node (node, allocno_hard_regs_node));
723 /* That is a temporary storage. */
724 allocno_hard_regs_node->used_p = true;
725 allocno_data->hard_regs_node = allocno_hard_regs_node;
727 ira_assert (hard_regs_roots->next == NULL);
728 hard_regs_roots->used_p = true;
729 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
730 allocno_hard_regs_nodes_num
731 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
732 allocno_hard_regs_nodes
733 = ((allocno_hard_regs_node_t *)
734 ira_allocate (allocno_hard_regs_nodes_num
735 * sizeof (allocno_hard_regs_node_t)));
736 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
737 allocno_hard_regs_subnode_index
738 = (int *) ira_allocate (size * sizeof (int));
739 for (i = 0; i < size; i++)
740 allocno_hard_regs_subnode_index[i] = -1;
741 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
742 start = 0;
743 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
745 a = ira_allocnos[i];
746 allocno_data = ALLOCNO_COLOR_DATA (a);
747 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
748 continue;
749 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
750 allocno_data->hard_regs_subnodes_start = start;
751 allocno_data->hard_regs_subnodes_num = len;
752 start += len;
754 allocno_hard_regs_subnodes
755 = ((allocno_hard_regs_subnode_t)
756 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
757 hard_regs_node_vec.release ();
760 /* Free tree of allocno hard registers nodes given by its ROOT. */
761 static void
762 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
764 allocno_hard_regs_node_t child, next;
766 for (child = root->first; child != NULL; child = next)
768 next = child->next;
769 finish_allocno_hard_regs_nodes_tree (child);
771 ira_free (root);
774 /* Finish work with the forest of allocno hard registers nodes. */
775 static void
776 finish_allocno_hard_regs_nodes_forest (void)
778 allocno_hard_regs_node_t node, next;
780 ira_free (allocno_hard_regs_subnodes);
781 for (node = hard_regs_roots; node != NULL; node = next)
783 next = node->next;
784 finish_allocno_hard_regs_nodes_tree (node);
786 ira_free (allocno_hard_regs_nodes);
787 ira_free (allocno_hard_regs_subnode_index);
788 finish_allocno_hard_regs ();
791 /* Set up left conflict sizes and left conflict subnodes sizes of hard
792 registers subnodes of allocno A. Return TRUE if allocno A is
793 trivially colorable. */
794 static bool
795 setup_left_conflict_sizes_p (ira_allocno_t a)
797 int i, k, nobj, start;
798 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
799 allocno_color_data_t data;
800 HARD_REG_SET profitable_hard_regs;
801 allocno_hard_regs_subnode_t subnodes;
802 allocno_hard_regs_node_t node;
803 HARD_REG_SET node_set;
805 nobj = ALLOCNO_NUM_OBJECTS (a);
806 conflict_size = 0;
807 data = ALLOCNO_COLOR_DATA (a);
808 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
809 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
810 node = data->hard_regs_node;
811 node_preorder_num = node->preorder_num;
812 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
813 node_check_tick++;
814 for (k = 0; k < nobj; k++)
816 ira_object_t obj = ALLOCNO_OBJECT (a, k);
817 ira_object_t conflict_obj;
818 ira_object_conflict_iterator oci;
820 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
822 int size;
823 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
824 allocno_hard_regs_node_t conflict_node, temp_node;
825 HARD_REG_SET conflict_node_set;
826 allocno_color_data_t conflict_data;
828 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
829 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
830 || ! hard_reg_set_intersect_p (profitable_hard_regs,
831 conflict_data
832 ->profitable_hard_regs))
833 continue;
834 conflict_node = conflict_data->hard_regs_node;
835 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
836 if (hard_reg_set_subset_p (node_set, conflict_node_set))
837 temp_node = node;
838 else
840 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
841 temp_node = conflict_node;
843 if (temp_node->check != node_check_tick)
845 temp_node->check = node_check_tick;
846 temp_node->conflict_size = 0;
848 size = (ira_reg_class_max_nregs
849 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
850 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
851 /* We will deal with the subwords individually. */
852 size = 1;
853 temp_node->conflict_size += size;
856 for (i = 0; i < data->hard_regs_subnodes_num; i++)
858 allocno_hard_regs_node_t temp_node;
860 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
861 ira_assert (temp_node->preorder_num == i + node_preorder_num);
862 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
863 ? 0 : temp_node->conflict_size);
864 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
865 profitable_hard_regs))
866 subnodes[i].max_node_impact = temp_node->hard_regs_num;
867 else
869 HARD_REG_SET temp_set;
870 int j, n, hard_regno;
871 enum reg_class aclass;
873 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
874 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
875 aclass = ALLOCNO_CLASS (a);
876 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
878 hard_regno = ira_class_hard_regs[aclass][j];
879 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
880 n++;
882 subnodes[i].max_node_impact = n;
884 subnodes[i].left_conflict_subnodes_size = 0;
886 start = node_preorder_num * allocno_hard_regs_nodes_num;
887 for (i = data->hard_regs_subnodes_num - 1; i >= 0; i--)
889 int size, parent_i;
890 allocno_hard_regs_node_t parent;
892 size = (subnodes[i].left_conflict_subnodes_size
893 + MIN (subnodes[i].max_node_impact
894 - subnodes[i].left_conflict_subnodes_size,
895 subnodes[i].left_conflict_size));
896 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
897 if (parent == NULL)
898 continue;
899 parent_i
900 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
901 if (parent_i < 0)
902 continue;
903 subnodes[parent_i].left_conflict_subnodes_size += size;
905 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
906 conflict_size
907 += (left_conflict_subnodes_size
908 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
909 subnodes[0].left_conflict_size));
910 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
911 data->colorable_p = conflict_size <= data->available_regs_num;
912 return data->colorable_p;
915 /* Update left conflict sizes of hard registers subnodes of allocno A
916 after removing allocno REMOVED_A with SIZE from the conflict graph.
917 Return TRUE if A is trivially colorable. */
918 static bool
919 update_left_conflict_sizes_p (ira_allocno_t a,
920 ira_allocno_t removed_a, int size)
922 int i, conflict_size, before_conflict_size, diff, start;
923 int node_preorder_num, parent_i;
924 allocno_hard_regs_node_t node, removed_node, parent;
925 allocno_hard_regs_subnode_t subnodes;
926 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
928 ira_assert (! data->colorable_p);
929 node = data->hard_regs_node;
930 node_preorder_num = node->preorder_num;
931 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
932 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
933 node->hard_regs->set)
934 || hard_reg_set_subset_p (node->hard_regs->set,
935 removed_node->hard_regs->set));
936 start = node_preorder_num * allocno_hard_regs_nodes_num;
937 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
938 if (i < 0)
939 i = 0;
940 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
941 before_conflict_size
942 = (subnodes[i].left_conflict_subnodes_size
943 + MIN (subnodes[i].max_node_impact
944 - subnodes[i].left_conflict_subnodes_size,
945 subnodes[i].left_conflict_size));
946 subnodes[i].left_conflict_size -= size;
947 for (;;)
949 conflict_size
950 = (subnodes[i].left_conflict_subnodes_size
951 + MIN (subnodes[i].max_node_impact
952 - subnodes[i].left_conflict_subnodes_size,
953 subnodes[i].left_conflict_size));
954 if ((diff = before_conflict_size - conflict_size) == 0)
955 break;
956 ira_assert (conflict_size < before_conflict_size);
957 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
958 if (parent == NULL)
959 break;
960 parent_i
961 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
962 if (parent_i < 0)
963 break;
964 i = parent_i;
965 before_conflict_size
966 = (subnodes[i].left_conflict_subnodes_size
967 + MIN (subnodes[i].max_node_impact
968 - subnodes[i].left_conflict_subnodes_size,
969 subnodes[i].left_conflict_size));
970 subnodes[i].left_conflict_subnodes_size -= diff;
972 if (i != 0
973 || (conflict_size
974 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
975 > data->available_regs_num))
976 return false;
977 data->colorable_p = true;
978 return true;
981 /* Return true if allocno A has empty profitable hard regs. */
982 static bool
983 empty_profitable_hard_regs (ira_allocno_t a)
985 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
987 return hard_reg_set_empty_p (data->profitable_hard_regs);
990 /* Set up profitable hard registers for each allocno being
991 colored. */
992 static void
993 setup_profitable_hard_regs (void)
995 unsigned int i;
996 int j, k, nobj, hard_regno, nregs, class_size;
997 ira_allocno_t a;
998 bitmap_iterator bi;
999 enum reg_class aclass;
1000 enum machine_mode mode;
1001 allocno_color_data_t data;
1003 /* Initial set up from allocno classes and explicitly conflicting
1004 hard regs. */
1005 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1007 a = ira_allocnos[i];
1008 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1009 continue;
1010 data = ALLOCNO_COLOR_DATA (a);
1011 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1012 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a))
1013 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1014 else
1016 mode = ALLOCNO_MODE (a);
1017 COPY_HARD_REG_SET (data->profitable_hard_regs,
1018 ira_useful_class_mode_regs[aclass][mode]);
1019 nobj = ALLOCNO_NUM_OBJECTS (a);
1020 for (k = 0; k < nobj; k++)
1022 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1024 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1025 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1029 /* Exclude hard regs already assigned for conflicting objects. */
1030 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1032 a = ira_allocnos[i];
1033 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1034 || ! ALLOCNO_ASSIGNED_P (a)
1035 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1036 continue;
1037 mode = ALLOCNO_MODE (a);
1038 nregs = hard_regno_nregs[hard_regno][mode];
1039 nobj = ALLOCNO_NUM_OBJECTS (a);
1040 for (k = 0; k < nobj; k++)
1042 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1043 ira_object_t conflict_obj;
1044 ira_object_conflict_iterator oci;
1046 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1048 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1050 /* We can process the conflict allocno repeatedly with
1051 the same result. */
1052 if (nregs == nobj && nregs > 1)
1054 int num = OBJECT_SUBWORD (conflict_obj);
1056 if (REG_WORDS_BIG_ENDIAN)
1057 CLEAR_HARD_REG_BIT
1058 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1059 hard_regno + nobj - num - 1);
1060 else
1061 CLEAR_HARD_REG_BIT
1062 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1063 hard_regno + num);
1065 else
1066 AND_COMPL_HARD_REG_SET
1067 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1068 ira_reg_mode_hard_regset[hard_regno][mode]);
1072 /* Exclude too costly hard regs. */
1073 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1075 int min_cost = INT_MAX;
1076 int *costs;
1078 a = ira_allocnos[i];
1079 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1080 || empty_profitable_hard_regs (a))
1081 continue;
1082 data = ALLOCNO_COLOR_DATA (a);
1083 mode = ALLOCNO_MODE (a);
1084 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1085 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1087 class_size = ira_class_hard_regs_num[aclass];
1088 for (j = 0; j < class_size; j++)
1090 hard_regno = ira_class_hard_regs[aclass][j];
1091 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1092 hard_regno))
1093 continue;
1094 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j])
1095 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1096 hard_regno);
1097 else if (min_cost > costs[j])
1098 min_cost = costs[j];
1101 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1102 < ALLOCNO_UPDATED_CLASS_COST (a))
1103 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1104 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1105 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1111 /* This page contains functions used to choose hard registers for
1112 allocnos. */
1114 /* Array whose element value is TRUE if the corresponding hard
1115 register was already allocated for an allocno. */
1116 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1118 /* Describes one element in a queue of allocnos whose costs need to be
1119 updated. Each allocno in the queue is known to have an allocno
1120 class. */
1121 struct update_cost_queue_elem
1123 /* This element is in the queue iff CHECK == update_cost_check. */
1124 int check;
1126 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1127 connecting this allocno to the one being allocated. */
1128 int divisor;
1130 /* The next allocno in the queue, or null if this is the last element. */
1131 ira_allocno_t next;
1134 /* The first element in a queue of allocnos whose copy costs need to be
1135 updated. Null if the queue is empty. */
1136 static ira_allocno_t update_cost_queue;
1138 /* The last element in the queue described by update_cost_queue.
1139 Not valid if update_cost_queue is null. */
1140 static struct update_cost_queue_elem *update_cost_queue_tail;
1142 /* A pool of elements in the queue described by update_cost_queue.
1143 Elements are indexed by ALLOCNO_NUM. */
1144 static struct update_cost_queue_elem *update_cost_queue_elems;
1146 /* The current value of update_copy_cost call count. */
1147 static int update_cost_check;
1149 /* Allocate and initialize data necessary for function
1150 update_copy_costs. */
1151 static void
1152 initiate_cost_update (void)
1154 size_t size;
1156 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1157 update_cost_queue_elems
1158 = (struct update_cost_queue_elem *) ira_allocate (size);
1159 memset (update_cost_queue_elems, 0, size);
1160 update_cost_check = 0;
1163 /* Deallocate data used by function update_copy_costs. */
1164 static void
1165 finish_cost_update (void)
1167 ira_free (update_cost_queue_elems);
1170 /* When we traverse allocnos to update hard register costs, the cost
1171 divisor will be multiplied by the following macro value for each
1172 hop from given allocno to directly connected allocnos. */
1173 #define COST_HOP_DIVISOR 4
1175 /* Start a new cost-updating pass. */
1176 static void
1177 start_update_cost (void)
1179 update_cost_check++;
1180 update_cost_queue = NULL;
1183 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue, unless
1184 ALLOCNO is already in the queue, or has NO_REGS class. */
1185 static inline void
1186 queue_update_cost (ira_allocno_t allocno, int divisor)
1188 struct update_cost_queue_elem *elem;
1190 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1191 if (elem->check != update_cost_check
1192 && ALLOCNO_CLASS (allocno) != NO_REGS)
1194 elem->check = update_cost_check;
1195 elem->divisor = divisor;
1196 elem->next = NULL;
1197 if (update_cost_queue == NULL)
1198 update_cost_queue = allocno;
1199 else
1200 update_cost_queue_tail->next = allocno;
1201 update_cost_queue_tail = elem;
1205 /* Try to remove the first element from update_cost_queue. Return false
1206 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
1207 the removed element. */
1208 static inline bool
1209 get_next_update_cost (ira_allocno_t *allocno, int *divisor)
1211 struct update_cost_queue_elem *elem;
1213 if (update_cost_queue == NULL)
1214 return false;
1216 *allocno = update_cost_queue;
1217 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1218 *divisor = elem->divisor;
1219 update_cost_queue = elem->next;
1220 return true;
1223 /* Update the cost of allocnos to increase chances to remove some
1224 copies as the result of subsequent assignment. */
1225 static void
1226 update_copy_costs (ira_allocno_t allocno, bool decr_p)
1228 int i, cost, update_cost, hard_regno, divisor;
1229 enum machine_mode mode;
1230 enum reg_class rclass, aclass;
1231 ira_allocno_t another_allocno;
1232 ira_copy_t cp, next_cp;
1234 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1235 ira_assert (hard_regno >= 0);
1237 aclass = ALLOCNO_CLASS (allocno);
1238 if (aclass == NO_REGS)
1239 return;
1240 i = ira_class_hard_reg_index[aclass][hard_regno];
1241 ira_assert (i >= 0);
1242 rclass = REGNO_REG_CLASS (hard_regno);
1244 start_update_cost ();
1245 divisor = 1;
1248 mode = ALLOCNO_MODE (allocno);
1249 ira_init_register_move_cost_if_necessary (mode);
1250 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1252 if (cp->first == allocno)
1254 next_cp = cp->next_first_allocno_copy;
1255 another_allocno = cp->second;
1257 else if (cp->second == allocno)
1259 next_cp = cp->next_second_allocno_copy;
1260 another_allocno = cp->first;
1262 else
1263 gcc_unreachable ();
1265 aclass = ALLOCNO_CLASS (another_allocno);
1266 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1267 hard_regno)
1268 || ALLOCNO_ASSIGNED_P (another_allocno))
1269 continue;
1271 cost = (cp->second == allocno
1272 ? ira_register_move_cost[mode][rclass][aclass]
1273 : ira_register_move_cost[mode][aclass][rclass]);
1274 if (decr_p)
1275 cost = -cost;
1277 update_cost = cp->freq * cost / divisor;
1278 if (update_cost == 0)
1279 continue;
1281 ira_allocate_and_set_or_copy_costs
1282 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), aclass,
1283 ALLOCNO_UPDATED_CLASS_COST (another_allocno),
1284 ALLOCNO_HARD_REG_COSTS (another_allocno));
1285 ira_allocate_and_set_or_copy_costs
1286 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1287 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1288 i = ira_class_hard_reg_index[aclass][hard_regno];
1289 if (i < 0)
1290 continue;
1291 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
1292 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
1293 += update_cost;
1295 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
1298 while (get_next_update_cost (&allocno, &divisor));
1301 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1302 of ACLASS by conflict costs of the unassigned allocnos
1303 connected by copies with allocnos in update_cost_queue. This
1304 update increases chances to remove some copies. */
1305 static void
1306 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1307 bool decr_p)
1309 int i, cost, class_size, freq, mult, div, divisor;
1310 int index, hard_regno;
1311 int *conflict_costs;
1312 bool cont_p;
1313 enum reg_class another_aclass;
1314 ira_allocno_t allocno, another_allocno;
1315 ira_copy_t cp, next_cp;
1317 while (get_next_update_cost (&allocno, &divisor))
1318 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1320 if (cp->first == allocno)
1322 next_cp = cp->next_first_allocno_copy;
1323 another_allocno = cp->second;
1325 else if (cp->second == allocno)
1327 next_cp = cp->next_second_allocno_copy;
1328 another_allocno = cp->first;
1330 else
1331 gcc_unreachable ();
1332 another_aclass = ALLOCNO_CLASS (another_allocno);
1333 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1334 || ALLOCNO_ASSIGNED_P (another_allocno)
1335 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1336 continue;
1337 class_size = ira_class_hard_regs_num[another_aclass];
1338 ira_allocate_and_copy_costs
1339 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1340 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1341 conflict_costs
1342 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1343 if (conflict_costs == NULL)
1344 cont_p = true;
1345 else
1347 mult = cp->freq;
1348 freq = ALLOCNO_FREQ (another_allocno);
1349 if (freq == 0)
1350 freq = 1;
1351 div = freq * divisor;
1352 cont_p = false;
1353 for (i = class_size - 1; i >= 0; i--)
1355 hard_regno = ira_class_hard_regs[another_aclass][i];
1356 ira_assert (hard_regno >= 0);
1357 index = ira_class_hard_reg_index[aclass][hard_regno];
1358 if (index < 0)
1359 continue;
1360 cost = conflict_costs [i] * mult / div;
1361 if (cost == 0)
1362 continue;
1363 cont_p = true;
1364 if (decr_p)
1365 cost = -cost;
1366 costs[index] += cost;
1369 /* Probably 5 hops will be enough. */
1370 if (cont_p
1371 && divisor <= (COST_HOP_DIVISOR
1372 * COST_HOP_DIVISOR
1373 * COST_HOP_DIVISOR
1374 * COST_HOP_DIVISOR))
1375 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
1379 /* Set up conflicting (through CONFLICT_REGS) for each object of
1380 allocno A and the start allocno profitable regs (through
1381 START_PROFITABLE_REGS). Remember that the start profitable regs
1382 exclude hard regs which can not hold value of mode of allocno A.
1383 This covers mostly cases when multi-register value should be
1384 aligned. */
1385 static inline void
1386 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1387 HARD_REG_SET *conflict_regs,
1388 HARD_REG_SET *start_profitable_regs)
1390 int i, nwords;
1391 ira_object_t obj;
1393 nwords = ALLOCNO_NUM_OBJECTS (a);
1394 for (i = 0; i < nwords; i++)
1396 obj = ALLOCNO_OBJECT (a, i);
1397 COPY_HARD_REG_SET (conflict_regs[i],
1398 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1400 if (retry_p)
1402 COPY_HARD_REG_SET (*start_profitable_regs,
1403 reg_class_contents[ALLOCNO_CLASS (a)]);
1404 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1405 ira_prohibited_class_mode_regs
1406 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1408 else
1409 COPY_HARD_REG_SET (*start_profitable_regs,
1410 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1413 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1414 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1415 static inline bool
1416 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1417 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1419 int j, nwords, nregs;
1420 enum reg_class aclass;
1421 enum machine_mode mode;
1423 aclass = ALLOCNO_CLASS (a);
1424 mode = ALLOCNO_MODE (a);
1425 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1426 hard_regno))
1427 return false;
1428 /* Checking only profitable hard regs. */
1429 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1430 return false;
1431 nregs = hard_regno_nregs[hard_regno][mode];
1432 nwords = ALLOCNO_NUM_OBJECTS (a);
1433 for (j = 0; j < nregs; j++)
1435 int k;
1436 int set_to_test_start = 0, set_to_test_end = nwords;
1438 if (nregs == nwords)
1440 if (REG_WORDS_BIG_ENDIAN)
1441 set_to_test_start = nwords - j - 1;
1442 else
1443 set_to_test_start = j;
1444 set_to_test_end = set_to_test_start + 1;
1446 for (k = set_to_test_start; k < set_to_test_end; k++)
1447 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1448 break;
1449 if (k != set_to_test_end)
1450 break;
1452 return j == nregs;
1454 #ifndef HONOR_REG_ALLOC_ORDER
1456 /* Return number of registers needed to be saved and restored at
1457 function prologue/epilogue if we allocate HARD_REGNO to hold value
1458 of MODE. */
1459 static int
1460 calculate_saved_nregs (int hard_regno, enum machine_mode mode)
1462 int i;
1463 int nregs = 0;
1465 ira_assert (hard_regno >= 0);
1466 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1467 if (!allocated_hardreg_p[hard_regno + i]
1468 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1469 && !LOCAL_REGNO (hard_regno + i))
1470 nregs++;
1471 return nregs;
1473 #endif
1475 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1476 that the function called from function
1477 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1478 this case some allocno data are not defined or updated and we
1479 should not touch these data. The function returns true if we
1480 managed to assign a hard register to the allocno.
1482 To assign a hard register, first of all we calculate all conflict
1483 hard registers which can come from conflicting allocnos with
1484 already assigned hard registers. After that we find first free
1485 hard register with the minimal cost. During hard register cost
1486 calculation we take conflict hard register costs into account to
1487 give a chance for conflicting allocnos to get a better hard
1488 register in the future.
1490 If the best hard register cost is bigger than cost of memory usage
1491 for the allocno, we don't assign a hard register to given allocno
1492 at all.
1494 If we assign a hard register to the allocno, we update costs of the
1495 hard register for allocnos connected by copies to improve a chance
1496 to coalesce insns represented by the copies when we assign hard
1497 registers to the allocnos connected by the copies. */
1498 static bool
1499 assign_hard_reg (ira_allocno_t a, bool retry_p)
1501 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1502 int i, j, hard_regno, best_hard_regno, class_size;
1503 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1504 int *a_costs;
1505 enum reg_class aclass;
1506 enum machine_mode mode;
1507 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1508 #ifndef HONOR_REG_ALLOC_ORDER
1509 int saved_nregs;
1510 enum reg_class rclass;
1511 int add_cost;
1512 #endif
1513 #ifdef STACK_REGS
1514 bool no_stack_reg_p;
1515 #endif
1517 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1518 get_conflict_and_start_profitable_regs (a, retry_p,
1519 conflicting_regs,
1520 &profitable_hard_regs);
1521 aclass = ALLOCNO_CLASS (a);
1522 class_size = ira_class_hard_regs_num[aclass];
1523 best_hard_regno = -1;
1524 memset (full_costs, 0, sizeof (int) * class_size);
1525 mem_cost = 0;
1526 memset (costs, 0, sizeof (int) * class_size);
1527 memset (full_costs, 0, sizeof (int) * class_size);
1528 #ifdef STACK_REGS
1529 no_stack_reg_p = false;
1530 #endif
1531 if (! retry_p)
1532 start_update_cost ();
1533 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1535 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1536 aclass, ALLOCNO_HARD_REG_COSTS (a));
1537 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1538 #ifdef STACK_REGS
1539 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1540 #endif
1541 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1542 for (i = 0; i < class_size; i++)
1543 if (a_costs != NULL)
1545 costs[i] += a_costs[i];
1546 full_costs[i] += a_costs[i];
1548 else
1550 costs[i] += cost;
1551 full_costs[i] += cost;
1553 nwords = ALLOCNO_NUM_OBJECTS (a);
1554 curr_allocno_process++;
1555 for (word = 0; word < nwords; word++)
1557 ira_object_t conflict_obj;
1558 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1559 ira_object_conflict_iterator oci;
1561 /* Take preferences of conflicting allocnos into account. */
1562 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1564 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1565 enum reg_class conflict_aclass;
1567 /* Reload can give another class so we need to check all
1568 allocnos. */
1569 if (!retry_p
1570 && (!bitmap_bit_p (consideration_allocno_bitmap,
1571 ALLOCNO_NUM (conflict_a))
1572 || ((!ALLOCNO_ASSIGNED_P (conflict_a)
1573 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1574 && !(hard_reg_set_intersect_p
1575 (profitable_hard_regs,
1576 ALLOCNO_COLOR_DATA
1577 (conflict_a)->profitable_hard_regs)))))
1578 continue;
1579 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1580 ira_assert (ira_reg_classes_intersect_p
1581 [aclass][conflict_aclass]);
1582 if (ALLOCNO_ASSIGNED_P (conflict_a))
1584 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1585 if (hard_regno >= 0
1586 && (ira_hard_reg_set_intersection_p
1587 (hard_regno, ALLOCNO_MODE (conflict_a),
1588 reg_class_contents[aclass])))
1590 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1591 int conflict_nregs;
1593 mode = ALLOCNO_MODE (conflict_a);
1594 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1595 if (conflict_nregs == n_objects && conflict_nregs > 1)
1597 int num = OBJECT_SUBWORD (conflict_obj);
1599 if (REG_WORDS_BIG_ENDIAN)
1600 SET_HARD_REG_BIT (conflicting_regs[word],
1601 hard_regno + n_objects - num - 1);
1602 else
1603 SET_HARD_REG_BIT (conflicting_regs[word],
1604 hard_regno + num);
1606 else
1607 IOR_HARD_REG_SET
1608 (conflicting_regs[word],
1609 ira_reg_mode_hard_regset[hard_regno][mode]);
1610 if (hard_reg_set_subset_p (profitable_hard_regs,
1611 conflicting_regs[word]))
1612 goto fail;
1615 else if (! retry_p
1616 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1617 /* Don't process the conflict allocno twice. */
1618 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1619 != curr_allocno_process))
1621 int k, *conflict_costs;
1623 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1624 = curr_allocno_process;
1625 ira_allocate_and_copy_costs
1626 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1627 conflict_aclass,
1628 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1629 conflict_costs
1630 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1631 if (conflict_costs != NULL)
1632 for (j = class_size - 1; j >= 0; j--)
1634 hard_regno = ira_class_hard_regs[aclass][j];
1635 ira_assert (hard_regno >= 0);
1636 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1637 if (k < 0)
1638 continue;
1639 full_costs[j] -= conflict_costs[k];
1641 queue_update_cost (conflict_a, COST_HOP_DIVISOR);
1645 if (! retry_p)
1646 /* Take into account preferences of allocnos connected by copies to
1647 the conflict allocnos. */
1648 update_conflict_hard_regno_costs (full_costs, aclass, true);
1650 /* Take preferences of allocnos connected by copies into
1651 account. */
1652 if (! retry_p)
1654 start_update_cost ();
1655 queue_update_cost (a, COST_HOP_DIVISOR);
1656 update_conflict_hard_regno_costs (full_costs, aclass, false);
1658 min_cost = min_full_cost = INT_MAX;
1659 /* We don't care about giving callee saved registers to allocnos no
1660 living through calls because call clobbered registers are
1661 allocated first (it is usual practice to put them first in
1662 REG_ALLOC_ORDER). */
1663 mode = ALLOCNO_MODE (a);
1664 for (i = 0; i < class_size; i++)
1666 hard_regno = ira_class_hard_regs[aclass][i];
1667 #ifdef STACK_REGS
1668 if (no_stack_reg_p
1669 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1670 continue;
1671 #endif
1672 if (! check_hard_reg_p (a, hard_regno,
1673 conflicting_regs, profitable_hard_regs))
1674 continue;
1675 cost = costs[i];
1676 full_cost = full_costs[i];
1677 #ifndef HONOR_REG_ALLOC_ORDER
1678 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1679 /* We need to save/restore the hard register in
1680 epilogue/prologue. Therefore we increase the cost. */
1682 rclass = REGNO_REG_CLASS (hard_regno);
1683 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1684 + ira_memory_move_cost[mode][rclass][1])
1685 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1686 cost += add_cost;
1687 full_cost += add_cost;
1689 #endif
1690 if (min_cost > cost)
1691 min_cost = cost;
1692 if (min_full_cost > full_cost)
1694 min_full_cost = full_cost;
1695 best_hard_regno = hard_regno;
1696 ira_assert (hard_regno >= 0);
1699 if (min_full_cost > mem_cost)
1701 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1702 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1703 mem_cost, min_full_cost);
1704 best_hard_regno = -1;
1706 fail:
1707 if (best_hard_regno >= 0)
1709 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1710 allocated_hardreg_p[best_hard_regno + i] = true;
1712 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1713 ALLOCNO_ASSIGNED_P (a) = true;
1714 if (best_hard_regno >= 0)
1715 update_copy_costs (a, true);
1716 ira_assert (ALLOCNO_CLASS (a) == aclass);
1717 /* We don't need updated costs anymore: */
1718 ira_free_allocno_updated_costs (a);
1719 return best_hard_regno >= 0;
1724 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
1726 /* Bucket of allocnos that can colored currently without spilling. */
1727 static ira_allocno_t colorable_allocno_bucket;
1729 /* Bucket of allocnos that might be not colored currently without
1730 spilling. */
1731 static ira_allocno_t uncolorable_allocno_bucket;
1733 /* The current number of allocnos in the uncolorable_bucket. */
1734 static int uncolorable_allocnos_num;
1736 /* Return the current spill priority of allocno A. The less the
1737 number, the more preferable the allocno for spilling. */
1738 static inline int
1739 allocno_spill_priority (ira_allocno_t a)
1741 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1743 return (data->temp
1744 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
1745 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1746 + 1));
1749 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
1750 before the call. */
1751 static void
1752 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
1754 ira_allocno_t first_a;
1755 allocno_color_data_t data;
1757 if (bucket_ptr == &uncolorable_allocno_bucket
1758 && ALLOCNO_CLASS (a) != NO_REGS)
1760 uncolorable_allocnos_num++;
1761 ira_assert (uncolorable_allocnos_num > 0);
1763 first_a = *bucket_ptr;
1764 data = ALLOCNO_COLOR_DATA (a);
1765 data->next_bucket_allocno = first_a;
1766 data->prev_bucket_allocno = NULL;
1767 if (first_a != NULL)
1768 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
1769 *bucket_ptr = a;
1772 /* Compare two allocnos to define which allocno should be pushed first
1773 into the coloring stack. If the return is a negative number, the
1774 allocno given by the first parameter will be pushed first. In this
1775 case such allocno has less priority than the second one and the
1776 hard register will be assigned to it after assignment to the second
1777 one. As the result of such assignment order, the second allocno
1778 has a better chance to get the best hard register. */
1779 static int
1780 bucket_allocno_compare_func (const void *v1p, const void *v2p)
1782 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1783 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1784 int diff, a1_freq, a2_freq, a1_num, a2_num;
1785 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
1787 /* Push pseudos requiring less hard registers first. It means that
1788 we will assign pseudos requiring more hard registers first
1789 avoiding creation small holes in free hard register file into
1790 which the pseudos requiring more hard registers can not fit. */
1791 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
1792 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
1793 return diff;
1794 a1_freq = ALLOCNO_FREQ (a1);
1795 a2_freq = ALLOCNO_FREQ (a2);
1796 if ((diff = a1_freq - a2_freq) != 0)
1797 return diff;
1798 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
1799 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
1800 if ((diff = a2_num - a1_num) != 0)
1801 return diff;
1802 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
1805 /* Sort bucket *BUCKET_PTR and return the result through
1806 BUCKET_PTR. */
1807 static void
1808 sort_bucket (ira_allocno_t *bucket_ptr,
1809 int (*compare_func) (const void *, const void *))
1811 ira_allocno_t a, head;
1812 int n;
1814 for (n = 0, a = *bucket_ptr;
1815 a != NULL;
1816 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
1817 sorted_allocnos[n++] = a;
1818 if (n <= 1)
1819 return;
1820 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
1821 head = NULL;
1822 for (n--; n >= 0; n--)
1824 a = sorted_allocnos[n];
1825 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
1826 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
1827 if (head != NULL)
1828 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
1829 head = a;
1831 *bucket_ptr = head;
1834 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
1835 their priority. ALLOCNO should be not in a bucket before the
1836 call. */
1837 static void
1838 add_allocno_to_ordered_bucket (ira_allocno_t allocno,
1839 ira_allocno_t *bucket_ptr)
1841 ira_allocno_t before, after;
1843 if (bucket_ptr == &uncolorable_allocno_bucket
1844 && ALLOCNO_CLASS (allocno) != NO_REGS)
1846 uncolorable_allocnos_num++;
1847 ira_assert (uncolorable_allocnos_num > 0);
1849 for (before = *bucket_ptr, after = NULL;
1850 before != NULL;
1851 after = before,
1852 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
1853 if (bucket_allocno_compare_func (&allocno, &before) < 0)
1854 break;
1855 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
1856 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
1857 if (after == NULL)
1858 *bucket_ptr = allocno;
1859 else
1860 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
1861 if (before != NULL)
1862 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
1865 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
1866 the call. */
1867 static void
1868 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
1870 ira_allocno_t prev_allocno, next_allocno;
1872 if (bucket_ptr == &uncolorable_allocno_bucket
1873 && ALLOCNO_CLASS (allocno) != NO_REGS)
1875 uncolorable_allocnos_num--;
1876 ira_assert (uncolorable_allocnos_num >= 0);
1878 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
1879 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
1880 if (prev_allocno != NULL)
1881 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
1882 else
1884 ira_assert (*bucket_ptr == allocno);
1885 *bucket_ptr = next_allocno;
1887 if (next_allocno != NULL)
1888 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
1891 /* Put allocno A onto the coloring stack without removing it from its
1892 bucket. Pushing allocno to the coloring stack can result in moving
1893 conflicting allocnos from the uncolorable bucket to the colorable
1894 one. */
1895 static void
1896 push_allocno_to_stack (ira_allocno_t a)
1898 enum reg_class aclass;
1899 allocno_color_data_t data, conflict_data;
1900 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
1902 data = ALLOCNO_COLOR_DATA (a);
1903 data->in_graph_p = false;
1904 allocno_stack_vec.safe_push (a);
1905 aclass = ALLOCNO_CLASS (a);
1906 if (aclass == NO_REGS)
1907 return;
1908 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
1909 if (n > 1)
1911 /* We will deal with the subwords individually. */
1912 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
1913 size = 1;
1915 for (i = 0; i < n; i++)
1917 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1918 ira_object_t conflict_obj;
1919 ira_object_conflict_iterator oci;
1921 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1923 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1925 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
1926 if (conflict_data->colorable_p
1927 || ! conflict_data->in_graph_p
1928 || ALLOCNO_ASSIGNED_P (conflict_a)
1929 || !(hard_reg_set_intersect_p
1930 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
1931 conflict_data->profitable_hard_regs)))
1932 continue;
1933 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
1934 ALLOCNO_NUM (conflict_a)));
1935 if (update_left_conflict_sizes_p (conflict_a, a, size))
1937 delete_allocno_from_bucket
1938 (conflict_a, &uncolorable_allocno_bucket);
1939 add_allocno_to_ordered_bucket
1940 (conflict_a, &colorable_allocno_bucket);
1941 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1943 fprintf (ira_dump_file, " Making");
1944 ira_print_expanded_allocno (conflict_a);
1945 fprintf (ira_dump_file, " colorable\n");
1953 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
1954 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
1955 static void
1956 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
1958 if (colorable_p)
1959 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
1960 else
1961 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
1962 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1964 fprintf (ira_dump_file, " Pushing");
1965 ira_print_expanded_allocno (allocno);
1966 if (colorable_p)
1967 fprintf (ira_dump_file, "(cost %d)\n",
1968 ALLOCNO_COLOR_DATA (allocno)->temp);
1969 else
1970 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
1971 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
1972 allocno_spill_priority (allocno),
1973 ALLOCNO_COLOR_DATA (allocno)->temp);
1975 if (! colorable_p)
1976 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
1977 push_allocno_to_stack (allocno);
1980 /* Put all allocnos from colorable bucket onto the coloring stack. */
1981 static void
1982 push_only_colorable (void)
1984 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
1985 for (;colorable_allocno_bucket != NULL;)
1986 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
1989 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
1990 loop given by its LOOP_NODE. */
1992 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
1994 int freq, i;
1995 edge_iterator ei;
1996 edge e;
1997 vec<edge> edges;
1999 ira_assert (current_loops != NULL && loop_node->loop != NULL
2000 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2001 freq = 0;
2002 if (! exit_p)
2004 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2005 if (e->src != loop_node->loop->latch
2006 && (regno < 0
2007 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2008 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2009 freq += EDGE_FREQUENCY (e);
2011 else
2013 edges = get_loop_exit_edges (loop_node->loop);
2014 FOR_EACH_VEC_ELT (edges, i, e)
2015 if (regno < 0
2016 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2017 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2018 freq += EDGE_FREQUENCY (e);
2019 edges.release ();
2022 return REG_FREQ_FROM_EDGE_FREQ (freq);
2025 /* Calculate and return the cost of putting allocno A into memory. */
2026 static int
2027 calculate_allocno_spill_cost (ira_allocno_t a)
2029 int regno, cost;
2030 enum machine_mode mode;
2031 enum reg_class rclass;
2032 ira_allocno_t parent_allocno;
2033 ira_loop_tree_node_t parent_node, loop_node;
2035 regno = ALLOCNO_REGNO (a);
2036 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2037 if (ALLOCNO_CAP (a) != NULL)
2038 return cost;
2039 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2040 if ((parent_node = loop_node->parent) == NULL)
2041 return cost;
2042 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2043 return cost;
2044 mode = ALLOCNO_MODE (a);
2045 rclass = ALLOCNO_CLASS (a);
2046 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2047 cost -= (ira_memory_move_cost[mode][rclass][0]
2048 * ira_loop_edge_freq (loop_node, regno, true)
2049 + ira_memory_move_cost[mode][rclass][1]
2050 * ira_loop_edge_freq (loop_node, regno, false));
2051 else
2053 ira_init_register_move_cost_if_necessary (mode);
2054 cost += ((ira_memory_move_cost[mode][rclass][1]
2055 * ira_loop_edge_freq (loop_node, regno, true)
2056 + ira_memory_move_cost[mode][rclass][0]
2057 * ira_loop_edge_freq (loop_node, regno, false))
2058 - (ira_register_move_cost[mode][rclass][rclass]
2059 * (ira_loop_edge_freq (loop_node, regno, false)
2060 + ira_loop_edge_freq (loop_node, regno, true))));
2062 return cost;
2065 /* Used for sorting allocnos for spilling. */
2066 static inline int
2067 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2069 int pri1, pri2, diff;
2071 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2072 return 1;
2073 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2074 return -1;
2075 pri1 = allocno_spill_priority (a1);
2076 pri2 = allocno_spill_priority (a2);
2077 if ((diff = pri1 - pri2) != 0)
2078 return diff;
2079 if ((diff
2080 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2081 return diff;
2082 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2085 /* Used for sorting allocnos for spilling. */
2086 static int
2087 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2089 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2090 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2092 return allocno_spill_priority_compare (p1, p2);
2095 /* Push allocnos to the coloring stack. The order of allocnos in the
2096 stack defines the order for the subsequent coloring. */
2097 static void
2098 push_allocnos_to_stack (void)
2100 ira_allocno_t a;
2101 int cost;
2103 /* Calculate uncolorable allocno spill costs. */
2104 for (a = uncolorable_allocno_bucket;
2105 a != NULL;
2106 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2107 if (ALLOCNO_CLASS (a) != NO_REGS)
2109 cost = calculate_allocno_spill_cost (a);
2110 /* ??? Remove cost of copies between the coalesced
2111 allocnos. */
2112 ALLOCNO_COLOR_DATA (a)->temp = cost;
2114 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2115 for (;;)
2117 push_only_colorable ();
2118 a = uncolorable_allocno_bucket;
2119 if (a == NULL)
2120 break;
2121 remove_allocno_from_bucket_and_push (a, false);
2123 ira_assert (colorable_allocno_bucket == NULL
2124 && uncolorable_allocno_bucket == NULL);
2125 ira_assert (uncolorable_allocnos_num == 0);
2128 /* Pop the coloring stack and assign hard registers to the popped
2129 allocnos. */
2130 static void
2131 pop_allocnos_from_stack (void)
2133 ira_allocno_t allocno;
2134 enum reg_class aclass;
2136 for (;allocno_stack_vec.length () != 0;)
2138 allocno = allocno_stack_vec.pop ();
2139 aclass = ALLOCNO_CLASS (allocno);
2140 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2142 fprintf (ira_dump_file, " Popping");
2143 ira_print_expanded_allocno (allocno);
2144 fprintf (ira_dump_file, " -- ");
2146 if (aclass == NO_REGS)
2148 ALLOCNO_HARD_REGNO (allocno) = -1;
2149 ALLOCNO_ASSIGNED_P (allocno) = true;
2150 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2151 ira_assert
2152 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2153 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2154 fprintf (ira_dump_file, "assign memory\n");
2156 else if (assign_hard_reg (allocno, false))
2158 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2159 fprintf (ira_dump_file, "assign reg %d\n",
2160 ALLOCNO_HARD_REGNO (allocno));
2162 else if (ALLOCNO_ASSIGNED_P (allocno))
2164 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2165 fprintf (ira_dump_file, "spill\n");
2167 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2171 /* Set up number of available hard registers for allocno A. */
2172 static void
2173 setup_allocno_available_regs_num (ira_allocno_t a)
2175 int i, n, hard_regno, hard_regs_num, nwords;
2176 enum reg_class aclass;
2177 allocno_color_data_t data;
2179 aclass = ALLOCNO_CLASS (a);
2180 data = ALLOCNO_COLOR_DATA (a);
2181 data->available_regs_num = 0;
2182 if (aclass == NO_REGS)
2183 return;
2184 hard_regs_num = ira_class_hard_regs_num[aclass];
2185 nwords = ALLOCNO_NUM_OBJECTS (a);
2186 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2188 hard_regno = ira_class_hard_regs[aclass][i];
2189 /* Checking only profitable hard regs. */
2190 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2191 n++;
2193 data->available_regs_num = n;
2194 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2195 return;
2196 fprintf
2197 (ira_dump_file,
2198 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2199 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2200 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2201 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2202 fprintf (ira_dump_file, ", %snode: ",
2203 hard_reg_set_equal_p (data->profitable_hard_regs,
2204 data->hard_regs_node->hard_regs->set)
2205 ? "" : "^");
2206 print_hard_reg_set (ira_dump_file,
2207 data->hard_regs_node->hard_regs->set, false);
2208 for (i = 0; i < nwords; i++)
2210 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2212 if (nwords != 1)
2214 if (i != 0)
2215 fprintf (ira_dump_file, ", ");
2216 fprintf (ira_dump_file, " obj %d", i);
2218 fprintf (ira_dump_file, " (confl regs = ");
2219 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2220 false);
2221 fprintf (ira_dump_file, ")");
2223 fprintf (ira_dump_file, "\n");
2226 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2227 conflicting allocnos and hard registers. */
2228 static void
2229 put_allocno_into_bucket (ira_allocno_t allocno)
2231 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2232 setup_allocno_available_regs_num (allocno);
2233 if (setup_left_conflict_sizes_p (allocno))
2234 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2235 else
2236 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2239 /* Map: allocno number -> allocno priority. */
2240 static int *allocno_priorities;
2242 /* Set up priorities for N allocnos in array
2243 CONSIDERATION_ALLOCNOS. */
2244 static void
2245 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2247 int i, length, nrefs, priority, max_priority, mult;
2248 ira_allocno_t a;
2250 max_priority = 0;
2251 for (i = 0; i < n; i++)
2253 a = consideration_allocnos[i];
2254 nrefs = ALLOCNO_NREFS (a);
2255 ira_assert (nrefs >= 0);
2256 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2257 ira_assert (mult >= 0);
2258 allocno_priorities[ALLOCNO_NUM (a)]
2259 = priority
2260 = (mult
2261 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2262 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2263 if (priority < 0)
2264 priority = -priority;
2265 if (max_priority < priority)
2266 max_priority = priority;
2268 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2269 for (i = 0; i < n; i++)
2271 a = consideration_allocnos[i];
2272 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2273 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2274 length /= ALLOCNO_NUM_OBJECTS (a);
2275 if (length <= 0)
2276 length = 1;
2277 allocno_priorities[ALLOCNO_NUM (a)]
2278 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2282 /* Sort allocnos according to the profit of usage of a hard register
2283 instead of memory for them. */
2284 static int
2285 allocno_cost_compare_func (const void *v1p, const void *v2p)
2287 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2288 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2289 int c1, c2;
2291 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2292 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2293 if (c1 - c2)
2294 return c1 - c2;
2296 /* If regs are equally good, sort by allocno numbers, so that the
2297 results of qsort leave nothing to chance. */
2298 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2301 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2302 possible to hard registers. Let us try to improve allocation with
2303 cost point of view. This function improves the allocation by
2304 spilling some allocnos and assigning the freed hard registers to
2305 other allocnos if it decreases the overall allocation cost. */
2306 static void
2307 improve_allocation (void)
2309 unsigned int i;
2310 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2311 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2312 bool try_p;
2313 enum reg_class aclass;
2314 enum machine_mode mode;
2315 int *allocno_costs;
2316 int costs[FIRST_PSEUDO_REGISTER];
2317 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2318 ira_allocno_t a;
2319 bitmap_iterator bi;
2321 /* Clear counts used to process conflicting allocnos only once for
2322 each allocno. */
2323 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2324 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2325 check = n = 0;
2326 /* Process each allocno and try to assign a hard register to it by
2327 spilling some its conflicting allocnos. */
2328 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2330 a = ira_allocnos[i];
2331 ALLOCNO_COLOR_DATA (a)->temp = 0;
2332 if (empty_profitable_hard_regs (a))
2333 continue;
2334 check++;
2335 aclass = ALLOCNO_CLASS (a);
2336 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2337 if (allocno_costs == NULL)
2338 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2339 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2340 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2341 else if (allocno_costs == NULL)
2342 /* It means that assigning a hard register is not profitable
2343 (we don't waste memory for hard register costs in this
2344 case). */
2345 continue;
2346 else
2347 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2348 try_p = false;
2349 get_conflict_and_start_profitable_regs (a, false,
2350 conflicting_regs,
2351 &profitable_hard_regs);
2352 class_size = ira_class_hard_regs_num[aclass];
2353 /* Set up cost improvement for usage of each profitable hard
2354 register for allocno A. */
2355 for (j = 0; j < class_size; j++)
2357 hregno = ira_class_hard_regs[aclass][j];
2358 if (! check_hard_reg_p (a, hregno,
2359 conflicting_regs, profitable_hard_regs))
2360 continue;
2361 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2362 k = allocno_costs == NULL ? 0 : j;
2363 costs[hregno] = (allocno_costs == NULL
2364 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2365 costs[hregno] -= base_cost;
2366 if (costs[hregno] < 0)
2367 try_p = true;
2369 if (! try_p)
2370 /* There is no chance to improve the allocation cost by
2371 assigning hard register to allocno A even without spilling
2372 conflicting allocnos. */
2373 continue;
2374 mode = ALLOCNO_MODE (a);
2375 nwords = ALLOCNO_NUM_OBJECTS (a);
2376 /* Process each allocno conflicting with A and update the cost
2377 improvement for profitable hard registers of A. To use a
2378 hard register for A we need to spill some conflicting
2379 allocnos and that creates penalty for the cost
2380 improvement. */
2381 for (word = 0; word < nwords; word++)
2383 ira_object_t conflict_obj;
2384 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2385 ira_object_conflict_iterator oci;
2387 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2389 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2391 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2392 /* We already processed this conflicting allocno
2393 because we processed earlier another object of the
2394 conflicting allocno. */
2395 continue;
2396 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2397 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2398 continue;
2399 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2400 k = (ira_class_hard_reg_index
2401 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2402 ira_assert (k >= 0);
2403 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2404 != NULL)
2405 spill_cost -= allocno_costs[k];
2406 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2407 != NULL)
2408 spill_cost -= allocno_costs[k];
2409 else
2410 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2411 conflict_nregs
2412 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2413 for (r = conflict_hregno;
2414 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2415 r--)
2416 if (check_hard_reg_p (a, r,
2417 conflicting_regs, profitable_hard_regs))
2418 costs[r] += spill_cost;
2419 for (r = conflict_hregno + 1;
2420 r < conflict_hregno + conflict_nregs;
2421 r++)
2422 if (check_hard_reg_p (a, r,
2423 conflicting_regs, profitable_hard_regs))
2424 costs[r] += spill_cost;
2427 min_cost = INT_MAX;
2428 best = -1;
2429 /* Now we choose hard register for A which results in highest
2430 allocation cost improvement. */
2431 for (j = 0; j < class_size; j++)
2433 hregno = ira_class_hard_regs[aclass][j];
2434 if (check_hard_reg_p (a, hregno,
2435 conflicting_regs, profitable_hard_regs)
2436 && min_cost > costs[hregno])
2438 best = hregno;
2439 min_cost = costs[hregno];
2442 if (min_cost >= 0)
2443 /* We are in a situation when assigning any hard register to A
2444 by spilling some conflicting allocnos does not improve the
2445 allocation cost. */
2446 continue;
2447 nregs = hard_regno_nregs[best][mode];
2448 /* Now spill conflicting allocnos which contain a hard register
2449 of A when we assign the best chosen hard register to it. */
2450 for (word = 0; word < nwords; word++)
2452 ira_object_t conflict_obj;
2453 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2454 ira_object_conflict_iterator oci;
2456 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2458 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2460 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2461 continue;
2462 conflict_nregs
2463 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2464 if (best + nregs <= conflict_hregno
2465 || conflict_hregno + conflict_nregs <= best)
2466 /* No intersection. */
2467 continue;
2468 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2469 sorted_allocnos[n++] = conflict_a;
2470 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2471 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2472 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2473 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2476 /* Assign the best chosen hard register to A. */
2477 ALLOCNO_HARD_REGNO (a) = best;
2478 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2479 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2480 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2482 if (n == 0)
2483 return;
2484 /* We spilled some allocnos to assign their hard registers to other
2485 allocnos. The spilled allocnos are now in array
2486 'sorted_allocnos'. There is still a possibility that some of the
2487 spilled allocnos can get hard registers. So let us try assign
2488 them hard registers again (just a reminder -- function
2489 'assign_hard_reg' assigns hard registers only if it is possible
2490 and profitable). We process the spilled allocnos with biggest
2491 benefit to get hard register first -- see function
2492 'allocno_cost_compare_func'. */
2493 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2494 allocno_cost_compare_func);
2495 for (j = 0; j < n; j++)
2497 a = sorted_allocnos[j];
2498 ALLOCNO_ASSIGNED_P (a) = false;
2499 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2501 fprintf (ira_dump_file, " ");
2502 ira_print_expanded_allocno (a);
2503 fprintf (ira_dump_file, " -- ");
2505 if (assign_hard_reg (a, false))
2507 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2508 fprintf (ira_dump_file, "assign hard reg %d\n",
2509 ALLOCNO_HARD_REGNO (a));
2511 else
2513 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2514 fprintf (ira_dump_file, "assign memory\n");
2519 /* Sort allocnos according to their priorities which are calculated
2520 analogous to ones in file `global.c'. */
2521 static int
2522 allocno_priority_compare_func (const void *v1p, const void *v2p)
2524 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2525 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2526 int pri1, pri2;
2528 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2529 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2530 if (pri2 != pri1)
2531 return SORTGT (pri2, pri1);
2533 /* If regs are equally good, sort by allocnos, so that the results of
2534 qsort leave nothing to chance. */
2535 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2538 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2539 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2540 static void
2541 color_allocnos (void)
2543 unsigned int i, n;
2544 bitmap_iterator bi;
2545 ira_allocno_t a;
2547 setup_profitable_hard_regs ();
2548 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
2550 n = 0;
2551 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2553 a = ira_allocnos[i];
2554 if (ALLOCNO_CLASS (a) == NO_REGS)
2556 ALLOCNO_HARD_REGNO (a) = -1;
2557 ALLOCNO_ASSIGNED_P (a) = true;
2558 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2559 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2560 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2562 fprintf (ira_dump_file, " Spill");
2563 ira_print_expanded_allocno (a);
2564 fprintf (ira_dump_file, "\n");
2566 continue;
2568 sorted_allocnos[n++] = a;
2570 if (n != 0)
2572 setup_allocno_priorities (sorted_allocnos, n);
2573 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2574 allocno_priority_compare_func);
2575 for (i = 0; i < n; i++)
2577 a = sorted_allocnos[i];
2578 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2580 fprintf (ira_dump_file, " ");
2581 ira_print_expanded_allocno (a);
2582 fprintf (ira_dump_file, " -- ");
2584 if (assign_hard_reg (a, false))
2586 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2587 fprintf (ira_dump_file, "assign hard reg %d\n",
2588 ALLOCNO_HARD_REGNO (a));
2590 else
2592 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2593 fprintf (ira_dump_file, "assign memory\n");
2598 else
2600 form_allocno_hard_regs_nodes_forest ();
2601 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2602 print_hard_regs_forest (ira_dump_file);
2603 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2605 a = ira_allocnos[i];
2606 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
2607 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
2608 else
2610 ALLOCNO_HARD_REGNO (a) = -1;
2611 ALLOCNO_ASSIGNED_P (a) = true;
2612 /* We don't need updated costs anymore. */
2613 ira_free_allocno_updated_costs (a);
2614 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2616 fprintf (ira_dump_file, " Spill");
2617 ira_print_expanded_allocno (a);
2618 fprintf (ira_dump_file, "\n");
2622 /* Put the allocnos into the corresponding buckets. */
2623 colorable_allocno_bucket = NULL;
2624 uncolorable_allocno_bucket = NULL;
2625 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2627 a = ira_allocnos[i];
2628 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
2629 put_allocno_into_bucket (a);
2631 push_allocnos_to_stack ();
2632 pop_allocnos_from_stack ();
2633 finish_allocno_hard_regs_nodes_forest ();
2635 improve_allocation ();
2640 /* Output information about the loop given by its LOOP_TREE_NODE. */
2641 static void
2642 print_loop_title (ira_loop_tree_node_t loop_tree_node)
2644 unsigned int j;
2645 bitmap_iterator bi;
2646 ira_loop_tree_node_t subloop_node, dest_loop_node;
2647 edge e;
2648 edge_iterator ei;
2650 if (loop_tree_node->parent == NULL)
2651 fprintf (ira_dump_file,
2652 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
2653 NUM_FIXED_BLOCKS);
2654 else
2656 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
2657 fprintf (ira_dump_file,
2658 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
2659 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
2660 loop_tree_node->loop->header->index,
2661 loop_depth (loop_tree_node->loop));
2663 for (subloop_node = loop_tree_node->children;
2664 subloop_node != NULL;
2665 subloop_node = subloop_node->next)
2666 if (subloop_node->bb != NULL)
2668 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
2669 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
2670 if (e->dest != EXIT_BLOCK_PTR
2671 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
2672 != loop_tree_node))
2673 fprintf (ira_dump_file, "(->%d:l%d)",
2674 e->dest->index, dest_loop_node->loop_num);
2676 fprintf (ira_dump_file, "\n all:");
2677 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2678 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2679 fprintf (ira_dump_file, "\n modified regnos:");
2680 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
2681 fprintf (ira_dump_file, " %d", j);
2682 fprintf (ira_dump_file, "\n border:");
2683 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
2684 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2685 fprintf (ira_dump_file, "\n Pressure:");
2686 for (j = 0; (int) j < ira_pressure_classes_num; j++)
2688 enum reg_class pclass;
2690 pclass = ira_pressure_classes[j];
2691 if (loop_tree_node->reg_pressure[pclass] == 0)
2692 continue;
2693 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
2694 loop_tree_node->reg_pressure[pclass]);
2696 fprintf (ira_dump_file, "\n");
2699 /* Color the allocnos inside loop (in the extreme case it can be all
2700 of the function) given the corresponding LOOP_TREE_NODE. The
2701 function is called for each loop during top-down traverse of the
2702 loop tree. */
2703 static void
2704 color_pass (ira_loop_tree_node_t loop_tree_node)
2706 int regno, hard_regno, index = -1, n;
2707 int cost, exit_freq, enter_freq;
2708 unsigned int j;
2709 bitmap_iterator bi;
2710 enum machine_mode mode;
2711 enum reg_class rclass, aclass, pclass;
2712 ira_allocno_t a, subloop_allocno;
2713 ira_loop_tree_node_t subloop_node;
2715 ira_assert (loop_tree_node->bb == NULL);
2716 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2717 print_loop_title (loop_tree_node);
2719 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
2720 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
2721 n = 0;
2722 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2724 a = ira_allocnos[j];
2725 n++;
2726 if (! ALLOCNO_ASSIGNED_P (a))
2727 continue;
2728 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
2730 allocno_color_data
2731 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
2732 * n);
2733 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
2734 curr_allocno_process = 0;
2735 n = 0;
2736 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2738 a = ira_allocnos[j];
2739 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
2740 n++;
2742 /* Color all mentioned allocnos including transparent ones. */
2743 color_allocnos ();
2744 /* Process caps. They are processed just once. */
2745 if (flag_ira_region == IRA_REGION_MIXED
2746 || flag_ira_region == IRA_REGION_ALL)
2747 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2749 a = ira_allocnos[j];
2750 if (ALLOCNO_CAP_MEMBER (a) == NULL)
2751 continue;
2752 /* Remove from processing in the next loop. */
2753 bitmap_clear_bit (consideration_allocno_bitmap, j);
2754 rclass = ALLOCNO_CLASS (a);
2755 pclass = ira_pressure_class_translate[rclass];
2756 if (flag_ira_region == IRA_REGION_MIXED
2757 && (loop_tree_node->reg_pressure[pclass]
2758 <= ira_class_hard_regs_num[pclass]))
2760 mode = ALLOCNO_MODE (a);
2761 hard_regno = ALLOCNO_HARD_REGNO (a);
2762 if (hard_regno >= 0)
2764 index = ira_class_hard_reg_index[rclass][hard_regno];
2765 ira_assert (index >= 0);
2767 regno = ALLOCNO_REGNO (a);
2768 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
2769 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
2770 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
2771 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2772 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2773 if (hard_regno >= 0)
2774 update_copy_costs (subloop_allocno, true);
2775 /* We don't need updated costs anymore: */
2776 ira_free_allocno_updated_costs (subloop_allocno);
2779 /* Update costs of the corresponding allocnos (not caps) in the
2780 subloops. */
2781 for (subloop_node = loop_tree_node->subloops;
2782 subloop_node != NULL;
2783 subloop_node = subloop_node->subloop_next)
2785 ira_assert (subloop_node->bb == NULL);
2786 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2788 a = ira_allocnos[j];
2789 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2790 mode = ALLOCNO_MODE (a);
2791 rclass = ALLOCNO_CLASS (a);
2792 pclass = ira_pressure_class_translate[rclass];
2793 hard_regno = ALLOCNO_HARD_REGNO (a);
2794 /* Use hard register class here. ??? */
2795 if (hard_regno >= 0)
2797 index = ira_class_hard_reg_index[rclass][hard_regno];
2798 ira_assert (index >= 0);
2800 regno = ALLOCNO_REGNO (a);
2801 /* ??? conflict costs */
2802 subloop_allocno = subloop_node->regno_allocno_map[regno];
2803 if (subloop_allocno == NULL
2804 || ALLOCNO_CAP (subloop_allocno) != NULL)
2805 continue;
2806 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
2807 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
2808 ALLOCNO_NUM (subloop_allocno)));
2809 if ((flag_ira_region == IRA_REGION_MIXED)
2810 && (loop_tree_node->reg_pressure[pclass]
2811 <= ira_class_hard_regs_num[pclass]))
2813 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2815 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2816 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2817 if (hard_regno >= 0)
2818 update_copy_costs (subloop_allocno, true);
2819 /* We don't need updated costs anymore: */
2820 ira_free_allocno_updated_costs (subloop_allocno);
2822 continue;
2824 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2825 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2826 ira_assert (regno < ira_reg_equiv_len);
2827 if (ira_equiv_no_lvalue_p (regno))
2829 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2831 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2832 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2833 if (hard_regno >= 0)
2834 update_copy_costs (subloop_allocno, true);
2835 /* We don't need updated costs anymore: */
2836 ira_free_allocno_updated_costs (subloop_allocno);
2839 else if (hard_regno < 0)
2841 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2842 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
2843 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
2845 else
2847 aclass = ALLOCNO_CLASS (subloop_allocno);
2848 ira_init_register_move_cost_if_necessary (mode);
2849 cost = (ira_register_move_cost[mode][rclass][rclass]
2850 * (exit_freq + enter_freq));
2851 ira_allocate_and_set_or_copy_costs
2852 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
2853 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
2854 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
2855 ira_allocate_and_set_or_copy_costs
2856 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
2857 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
2858 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
2859 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
2860 -= cost;
2861 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
2862 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
2863 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
2864 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
2865 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2866 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
2867 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
2871 ira_free (allocno_color_data);
2872 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
2874 a = ira_allocnos[j];
2875 ALLOCNO_ADD_DATA (a) = NULL;
2879 /* Initialize the common data for coloring and calls functions to do
2880 Chaitin-Briggs and regional coloring. */
2881 static void
2882 do_coloring (void)
2884 coloring_allocno_bitmap = ira_allocate_bitmap ();
2885 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2886 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
2888 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
2890 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2891 ira_print_disposition (ira_dump_file);
2893 ira_free_bitmap (coloring_allocno_bitmap);
2898 /* Move spill/restore code, which are to be generated in ira-emit.c,
2899 to less frequent points (if it is profitable) by reassigning some
2900 allocnos (in loop with subloops containing in another loop) to
2901 memory which results in longer live-range where the corresponding
2902 pseudo-registers will be in memory. */
2903 static void
2904 move_spill_restore (void)
2906 int cost, regno, hard_regno, hard_regno2, index;
2907 bool changed_p;
2908 int enter_freq, exit_freq;
2909 enum machine_mode mode;
2910 enum reg_class rclass;
2911 ira_allocno_t a, parent_allocno, subloop_allocno;
2912 ira_loop_tree_node_t parent, loop_node, subloop_node;
2913 ira_allocno_iterator ai;
2915 for (;;)
2917 changed_p = false;
2918 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2919 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
2920 FOR_EACH_ALLOCNO (a, ai)
2922 regno = ALLOCNO_REGNO (a);
2923 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2924 if (ALLOCNO_CAP_MEMBER (a) != NULL
2925 || ALLOCNO_CAP (a) != NULL
2926 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
2927 || loop_node->children == NULL
2928 /* don't do the optimization because it can create
2929 copies and the reload pass can spill the allocno set
2930 by copy although the allocno will not get memory
2931 slot. */
2932 || ira_equiv_no_lvalue_p (regno)
2933 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
2934 continue;
2935 mode = ALLOCNO_MODE (a);
2936 rclass = ALLOCNO_CLASS (a);
2937 index = ira_class_hard_reg_index[rclass][hard_regno];
2938 ira_assert (index >= 0);
2939 cost = (ALLOCNO_MEMORY_COST (a)
2940 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2941 ? ALLOCNO_CLASS_COST (a)
2942 : ALLOCNO_HARD_REG_COSTS (a)[index]));
2943 ira_init_register_move_cost_if_necessary (mode);
2944 for (subloop_node = loop_node->subloops;
2945 subloop_node != NULL;
2946 subloop_node = subloop_node->subloop_next)
2948 ira_assert (subloop_node->bb == NULL);
2949 subloop_allocno = subloop_node->regno_allocno_map[regno];
2950 if (subloop_allocno == NULL)
2951 continue;
2952 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
2953 /* We have accumulated cost. To get the real cost of
2954 allocno usage in the loop we should subtract costs of
2955 the subloop allocnos. */
2956 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
2957 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
2958 ? ALLOCNO_CLASS_COST (subloop_allocno)
2959 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
2960 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2961 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2962 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
2963 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2964 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2965 else
2967 cost
2968 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
2969 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2970 if (hard_regno2 != hard_regno)
2971 cost -= (ira_register_move_cost[mode][rclass][rclass]
2972 * (exit_freq + enter_freq));
2975 if ((parent = loop_node->parent) != NULL
2976 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
2978 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
2979 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
2980 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
2981 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
2982 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2983 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2984 else
2986 cost
2987 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
2988 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
2989 if (hard_regno2 != hard_regno)
2990 cost -= (ira_register_move_cost[mode][rclass][rclass]
2991 * (exit_freq + enter_freq));
2994 if (cost < 0)
2996 ALLOCNO_HARD_REGNO (a) = -1;
2997 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2999 fprintf
3000 (ira_dump_file,
3001 " Moving spill/restore for a%dr%d up from loop %d",
3002 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3003 fprintf (ira_dump_file, " - profit %d\n", -cost);
3005 changed_p = true;
3008 if (! changed_p)
3009 break;
3015 /* Update current hard reg costs and current conflict hard reg costs
3016 for allocno A. It is done by processing its copies containing
3017 other allocnos already assigned. */
3018 static void
3019 update_curr_costs (ira_allocno_t a)
3021 int i, hard_regno, cost;
3022 enum machine_mode mode;
3023 enum reg_class aclass, rclass;
3024 ira_allocno_t another_a;
3025 ira_copy_t cp, next_cp;
3027 ira_free_allocno_updated_costs (a);
3028 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3029 aclass = ALLOCNO_CLASS (a);
3030 if (aclass == NO_REGS)
3031 return;
3032 mode = ALLOCNO_MODE (a);
3033 ira_init_register_move_cost_if_necessary (mode);
3034 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3036 if (cp->first == a)
3038 next_cp = cp->next_first_allocno_copy;
3039 another_a = cp->second;
3041 else if (cp->second == a)
3043 next_cp = cp->next_second_allocno_copy;
3044 another_a = cp->first;
3046 else
3047 gcc_unreachable ();
3048 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3049 || ! ALLOCNO_ASSIGNED_P (another_a)
3050 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3051 continue;
3052 rclass = REGNO_REG_CLASS (hard_regno);
3053 i = ira_class_hard_reg_index[aclass][hard_regno];
3054 if (i < 0)
3055 continue;
3056 cost = (cp->first == a
3057 ? ira_register_move_cost[mode][rclass][aclass]
3058 : ira_register_move_cost[mode][aclass][rclass]);
3059 ira_allocate_and_set_or_copy_costs
3060 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3061 ALLOCNO_HARD_REG_COSTS (a));
3062 ira_allocate_and_set_or_copy_costs
3063 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3064 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3065 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3066 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3070 /* Try to assign hard registers to the unassigned allocnos and
3071 allocnos conflicting with them or conflicting with allocnos whose
3072 regno >= START_REGNO. The function is called after ira_flattening,
3073 so more allocnos (including ones created in ira-emit.c) will have a
3074 chance to get a hard register. We use simple assignment algorithm
3075 based on priorities. */
3076 void
3077 ira_reassign_conflict_allocnos (int start_regno)
3079 int i, allocnos_to_color_num;
3080 ira_allocno_t a;
3081 enum reg_class aclass;
3082 bitmap allocnos_to_color;
3083 ira_allocno_iterator ai;
3085 allocnos_to_color = ira_allocate_bitmap ();
3086 allocnos_to_color_num = 0;
3087 FOR_EACH_ALLOCNO (a, ai)
3089 int n = ALLOCNO_NUM_OBJECTS (a);
3091 if (! ALLOCNO_ASSIGNED_P (a)
3092 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3094 if (ALLOCNO_CLASS (a) != NO_REGS)
3095 sorted_allocnos[allocnos_to_color_num++] = a;
3096 else
3098 ALLOCNO_ASSIGNED_P (a) = true;
3099 ALLOCNO_HARD_REGNO (a) = -1;
3100 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3101 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3103 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3105 if (ALLOCNO_REGNO (a) < start_regno
3106 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3107 continue;
3108 for (i = 0; i < n; i++)
3110 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3111 ira_object_t conflict_obj;
3112 ira_object_conflict_iterator oci;
3114 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3116 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3118 ira_assert (ira_reg_classes_intersect_p
3119 [aclass][ALLOCNO_CLASS (conflict_a)]);
3120 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3121 continue;
3122 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3126 ira_free_bitmap (allocnos_to_color);
3127 if (allocnos_to_color_num > 1)
3129 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3130 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3131 allocno_priority_compare_func);
3133 for (i = 0; i < allocnos_to_color_num; i++)
3135 a = sorted_allocnos[i];
3136 ALLOCNO_ASSIGNED_P (a) = false;
3137 update_curr_costs (a);
3139 for (i = 0; i < allocnos_to_color_num; i++)
3141 a = sorted_allocnos[i];
3142 if (assign_hard_reg (a, true))
3144 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3145 fprintf
3146 (ira_dump_file,
3147 " Secondary allocation: assign hard reg %d to reg %d\n",
3148 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3155 /* This page contains functions used to find conflicts using allocno
3156 live ranges. */
3158 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
3159 used to find a conflict for new allocnos or allocnos with the
3160 different allocno classes. */
3161 static bool
3162 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
3164 rtx reg1, reg2;
3165 int i, j;
3166 int n1 = ALLOCNO_NUM_OBJECTS (a1);
3167 int n2 = ALLOCNO_NUM_OBJECTS (a2);
3169 if (a1 == a2)
3170 return false;
3171 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
3172 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
3173 if (reg1 != NULL && reg2 != NULL
3174 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
3175 return false;
3177 for (i = 0; i < n1; i++)
3179 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
3181 for (j = 0; j < n2; j++)
3183 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
3185 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
3186 OBJECT_LIVE_RANGES (c2)))
3187 return true;
3190 return false;
3193 #ifdef ENABLE_IRA_CHECKING
3195 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3196 intersect. This should be used when there is only one region.
3197 Currently this is used during reload. */
3198 static bool
3199 conflict_by_live_ranges_p (int regno1, int regno2)
3201 ira_allocno_t a1, a2;
3203 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3204 && regno2 >= FIRST_PSEUDO_REGISTER);
3205 /* Reg info caclulated by dataflow infrastructure can be different
3206 from one calculated by regclass. */
3207 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3208 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3209 return false;
3210 return allocnos_conflict_by_live_ranges_p (a1, a2);
3213 #endif
3217 /* This page contains code to coalesce memory stack slots used by
3218 spilled allocnos. This results in smaller stack frame, better data
3219 locality, and in smaller code for some architectures like
3220 x86/x86_64 where insn size depends on address displacement value.
3221 On the other hand, it can worsen insn scheduling after the RA but
3222 in practice it is less important than smaller stack frames. */
3224 /* TRUE if we coalesced some allocnos. In other words, if we got
3225 loops formed by members first_coalesced_allocno and
3226 next_coalesced_allocno containing more one allocno. */
3227 static bool allocno_coalesced_p;
3229 /* Bitmap used to prevent a repeated allocno processing because of
3230 coalescing. */
3231 static bitmap processed_coalesced_allocno_bitmap;
3233 /* See below. */
3234 typedef struct coalesce_data *coalesce_data_t;
3236 /* To decrease footprint of ira_allocno structure we store all data
3237 needed only for coalescing in the following structure. */
3238 struct coalesce_data
3240 /* Coalesced allocnos form a cyclic list. One allocno given by
3241 FIRST represents all coalesced allocnos. The
3242 list is chained by NEXT. */
3243 ira_allocno_t first;
3244 ira_allocno_t next;
3245 int temp;
3248 /* Container for storing allocno data concerning coalescing. */
3249 static coalesce_data_t allocno_coalesce_data;
3251 /* Macro to access the data concerning coalescing. */
3252 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3254 /* The function is used to sort allocnos according to their execution
3255 frequencies. */
3256 static int
3257 copy_freq_compare_func (const void *v1p, const void *v2p)
3259 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
3260 int pri1, pri2;
3262 pri1 = cp1->freq;
3263 pri2 = cp2->freq;
3264 if (pri2 - pri1)
3265 return pri2 - pri1;
3267 /* If freqencies are equal, sort by copies, so that the results of
3268 qsort leave nothing to chance. */
3269 return cp1->num - cp2->num;
3272 /* Merge two sets of coalesced allocnos given correspondingly by
3273 allocnos A1 and A2 (more accurately merging A2 set into A1
3274 set). */
3275 static void
3276 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3278 ira_allocno_t a, first, last, next;
3280 first = ALLOCNO_COALESCE_DATA (a1)->first;
3281 a = ALLOCNO_COALESCE_DATA (a2)->first;
3282 if (first == a)
3283 return;
3284 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3285 a = ALLOCNO_COALESCE_DATA (a)->next)
3287 ALLOCNO_COALESCE_DATA (a)->first = first;
3288 if (a == a2)
3289 break;
3290 last = a;
3292 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3293 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3294 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3297 /* Return TRUE if there are conflicting allocnos from two sets of
3298 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3299 use live ranges to find conflicts because conflicts are represented
3300 only for allocnos of the same allocno class and during the reload
3301 pass we coalesce allocnos for sharing stack memory slots. */
3302 static bool
3303 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3305 ira_allocno_t a, conflict_a;
3307 if (allocno_coalesced_p)
3309 bitmap_clear (processed_coalesced_allocno_bitmap);
3310 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3311 a = ALLOCNO_COALESCE_DATA (a)->next)
3313 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3314 if (a == a1)
3315 break;
3318 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3319 a = ALLOCNO_COALESCE_DATA (a)->next)
3321 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3322 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3324 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3325 return true;
3326 if (conflict_a == a1)
3327 break;
3329 if (a == a2)
3330 break;
3332 return false;
3335 /* The major function for aggressive allocno coalescing. We coalesce
3336 only spilled allocnos. If some allocnos have been coalesced, we
3337 set up flag allocno_coalesced_p. */
3338 static void
3339 coalesce_allocnos (void)
3341 ira_allocno_t a;
3342 ira_copy_t cp, next_cp, *sorted_copies;
3343 unsigned int j;
3344 int i, n, cp_num, regno;
3345 bitmap_iterator bi;
3347 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
3348 * sizeof (ira_copy_t));
3349 cp_num = 0;
3350 /* Collect copies. */
3351 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3353 a = ira_allocnos[j];
3354 regno = ALLOCNO_REGNO (a);
3355 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3356 || ira_equiv_no_lvalue_p (regno))
3357 continue;
3358 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3360 if (cp->first == a)
3362 next_cp = cp->next_first_allocno_copy;
3363 regno = ALLOCNO_REGNO (cp->second);
3364 /* For priority coloring we coalesce allocnos only with
3365 the same allocno class not with intersected allocno
3366 classes as it were possible. It is done for
3367 simplicity. */
3368 if ((cp->insn != NULL || cp->constraint_p)
3369 && ALLOCNO_ASSIGNED_P (cp->second)
3370 && ALLOCNO_HARD_REGNO (cp->second) < 0
3371 && ! ira_equiv_no_lvalue_p (regno))
3372 sorted_copies[cp_num++] = cp;
3374 else if (cp->second == a)
3375 next_cp = cp->next_second_allocno_copy;
3376 else
3377 gcc_unreachable ();
3380 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3381 /* Coalesced copies, most frequently executed first. */
3382 for (; cp_num != 0;)
3384 for (i = 0; i < cp_num; i++)
3386 cp = sorted_copies[i];
3387 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3389 allocno_coalesced_p = true;
3390 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3391 fprintf
3392 (ira_dump_file,
3393 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3394 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3395 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3396 cp->freq);
3397 merge_allocnos (cp->first, cp->second);
3398 i++;
3399 break;
3402 /* Collect the rest of copies. */
3403 for (n = 0; i < cp_num; i++)
3405 cp = sorted_copies[i];
3406 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3407 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3408 sorted_copies[n++] = cp;
3410 cp_num = n;
3412 ira_free (sorted_copies);
3415 /* Usage cost and order number of coalesced allocno set to which
3416 given pseudo register belongs to. */
3417 static int *regno_coalesced_allocno_cost;
3418 static int *regno_coalesced_allocno_num;
3420 /* Sort pseudos according frequencies of coalesced allocno sets they
3421 belong to (putting most frequently ones first), and according to
3422 coalesced allocno set order numbers. */
3423 static int
3424 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3426 const int regno1 = *(const int *) v1p;
3427 const int regno2 = *(const int *) v2p;
3428 int diff;
3430 if ((diff = (regno_coalesced_allocno_cost[regno2]
3431 - regno_coalesced_allocno_cost[regno1])) != 0)
3432 return diff;
3433 if ((diff = (regno_coalesced_allocno_num[regno1]
3434 - regno_coalesced_allocno_num[regno2])) != 0)
3435 return diff;
3436 return regno1 - regno2;
3439 /* Widest width in which each pseudo reg is referred to (via subreg).
3440 It is used for sorting pseudo registers. */
3441 static unsigned int *regno_max_ref_width;
3443 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
3444 #ifdef STACK_GROWS_DOWNWARD
3445 # undef STACK_GROWS_DOWNWARD
3446 # define STACK_GROWS_DOWNWARD 1
3447 #else
3448 # define STACK_GROWS_DOWNWARD 0
3449 #endif
3451 /* Sort pseudos according their slot numbers (putting ones with
3452 smaller numbers first, or last when the frame pointer is not
3453 needed). */
3454 static int
3455 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3457 const int regno1 = *(const int *) v1p;
3458 const int regno2 = *(const int *) v2p;
3459 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3460 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3461 int diff, slot_num1, slot_num2;
3462 int total_size1, total_size2;
3464 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3466 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3467 return regno1 - regno2;
3468 return 1;
3470 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3471 return -1;
3472 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3473 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3474 if ((diff = slot_num1 - slot_num2) != 0)
3475 return (frame_pointer_needed
3476 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
3477 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3478 regno_max_ref_width[regno1]);
3479 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3480 regno_max_ref_width[regno2]);
3481 if ((diff = total_size2 - total_size1) != 0)
3482 return diff;
3483 return regno1 - regno2;
3486 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3487 for coalesced allocno sets containing allocnos with their regnos
3488 given in array PSEUDO_REGNOS of length N. */
3489 static void
3490 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3492 int i, num, regno, cost;
3493 ira_allocno_t allocno, a;
3495 for (num = i = 0; i < n; i++)
3497 regno = pseudo_regnos[i];
3498 allocno = ira_regno_allocno_map[regno];
3499 if (allocno == NULL)
3501 regno_coalesced_allocno_cost[regno] = 0;
3502 regno_coalesced_allocno_num[regno] = ++num;
3503 continue;
3505 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3506 continue;
3507 num++;
3508 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3509 a = ALLOCNO_COALESCE_DATA (a)->next)
3511 cost += ALLOCNO_FREQ (a);
3512 if (a == allocno)
3513 break;
3515 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3516 a = ALLOCNO_COALESCE_DATA (a)->next)
3518 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3519 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3520 if (a == allocno)
3521 break;
3526 /* Collect spilled allocnos representing coalesced allocno sets (the
3527 first coalesced allocno). The collected allocnos are returned
3528 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3529 number of the collected allocnos. The allocnos are given by their
3530 regnos in array PSEUDO_REGNOS of length N. */
3531 static int
3532 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3533 ira_allocno_t *spilled_coalesced_allocnos)
3535 int i, num, regno;
3536 ira_allocno_t allocno;
3538 for (num = i = 0; i < n; i++)
3540 regno = pseudo_regnos[i];
3541 allocno = ira_regno_allocno_map[regno];
3542 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3543 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3544 continue;
3545 spilled_coalesced_allocnos[num++] = allocno;
3547 return num;
3550 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3551 given slot contains live ranges of coalesced allocnos assigned to
3552 given slot. */
3553 static live_range_t *slot_coalesced_allocnos_live_ranges;
3555 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3556 ranges intersected with live ranges of coalesced allocnos assigned
3557 to slot with number N. */
3558 static bool
3559 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3561 ira_allocno_t a;
3563 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3564 a = ALLOCNO_COALESCE_DATA (a)->next)
3566 int i;
3567 int nr = ALLOCNO_NUM_OBJECTS (a);
3569 for (i = 0; i < nr; i++)
3571 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3573 if (ira_live_ranges_intersect_p
3574 (slot_coalesced_allocnos_live_ranges[n],
3575 OBJECT_LIVE_RANGES (obj)))
3576 return true;
3578 if (a == allocno)
3579 break;
3581 return false;
3584 /* Update live ranges of slot to which coalesced allocnos represented
3585 by ALLOCNO were assigned. */
3586 static void
3587 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
3589 int i, n;
3590 ira_allocno_t a;
3591 live_range_t r;
3593 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
3594 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3595 a = ALLOCNO_COALESCE_DATA (a)->next)
3597 int nr = ALLOCNO_NUM_OBJECTS (a);
3598 for (i = 0; i < nr; i++)
3600 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3602 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
3603 slot_coalesced_allocnos_live_ranges[n]
3604 = ira_merge_live_ranges
3605 (slot_coalesced_allocnos_live_ranges[n], r);
3607 if (a == allocno)
3608 break;
3612 /* We have coalesced allocnos involving in copies. Coalesce allocnos
3613 further in order to share the same memory stack slot. Allocnos
3614 representing sets of allocnos coalesced before the call are given
3615 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
3616 some allocnos were coalesced in the function. */
3617 static bool
3618 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
3620 int i, j, n, last_coalesced_allocno_num;
3621 ira_allocno_t allocno, a;
3622 bool merged_p = false;
3623 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
3625 slot_coalesced_allocnos_live_ranges
3626 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
3627 memset (slot_coalesced_allocnos_live_ranges, 0,
3628 sizeof (live_range_t) * ira_allocnos_num);
3629 last_coalesced_allocno_num = 0;
3630 /* Coalesce non-conflicting spilled allocnos preferring most
3631 frequently used. */
3632 for (i = 0; i < num; i++)
3634 allocno = spilled_coalesced_allocnos[i];
3635 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
3636 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
3637 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
3638 continue;
3639 for (j = 0; j < i; j++)
3641 a = spilled_coalesced_allocnos[j];
3642 n = ALLOCNO_COALESCE_DATA (a)->temp;
3643 if (ALLOCNO_COALESCE_DATA (a)->first == a
3644 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
3645 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
3646 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
3647 break;
3649 if (j >= i)
3651 /* No coalescing: set up number for coalesced allocnos
3652 represented by ALLOCNO. */
3653 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
3654 setup_slot_coalesced_allocno_live_ranges (allocno);
3656 else
3658 allocno_coalesced_p = true;
3659 merged_p = true;
3660 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3661 fprintf (ira_dump_file,
3662 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
3663 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
3664 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
3665 ALLOCNO_COALESCE_DATA (allocno)->temp
3666 = ALLOCNO_COALESCE_DATA (a)->temp;
3667 setup_slot_coalesced_allocno_live_ranges (allocno);
3668 merge_allocnos (a, allocno);
3669 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
3672 for (i = 0; i < ira_allocnos_num; i++)
3673 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
3674 ira_free (slot_coalesced_allocnos_live_ranges);
3675 return merged_p;
3678 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
3679 subsequent assigning stack slots to them in the reload pass. To do
3680 this we coalesce spilled allocnos first to decrease the number of
3681 memory-memory move insns. This function is called by the
3682 reload. */
3683 void
3684 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
3685 unsigned int *reg_max_ref_width)
3687 int max_regno = max_reg_num ();
3688 int i, regno, num, slot_num;
3689 ira_allocno_t allocno, a;
3690 ira_allocno_iterator ai;
3691 ira_allocno_t *spilled_coalesced_allocnos;
3693 /* Set up allocnos can be coalesced. */
3694 coloring_allocno_bitmap = ira_allocate_bitmap ();
3695 for (i = 0; i < n; i++)
3697 regno = pseudo_regnos[i];
3698 allocno = ira_regno_allocno_map[regno];
3699 if (allocno != NULL)
3700 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
3702 allocno_coalesced_p = false;
3703 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
3704 allocno_coalesce_data
3705 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
3706 * ira_allocnos_num);
3707 /* Initialize coalesce data for allocnos. */
3708 FOR_EACH_ALLOCNO (a, ai)
3710 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
3711 ALLOCNO_COALESCE_DATA (a)->first = a;
3712 ALLOCNO_COALESCE_DATA (a)->next = a;
3714 coalesce_allocnos ();
3715 ira_free_bitmap (coloring_allocno_bitmap);
3716 regno_coalesced_allocno_cost
3717 = (int *) ira_allocate (max_regno * sizeof (int));
3718 regno_coalesced_allocno_num
3719 = (int *) ira_allocate (max_regno * sizeof (int));
3720 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
3721 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
3722 /* Sort regnos according frequencies of the corresponding coalesced
3723 allocno sets. */
3724 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
3725 spilled_coalesced_allocnos
3726 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
3727 * sizeof (ira_allocno_t));
3728 /* Collect allocnos representing the spilled coalesced allocno
3729 sets. */
3730 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
3731 spilled_coalesced_allocnos);
3732 if (flag_ira_share_spill_slots
3733 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
3735 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
3736 qsort (pseudo_regnos, n, sizeof (int),
3737 coalesced_pseudo_reg_freq_compare);
3738 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
3739 spilled_coalesced_allocnos);
3741 ira_free_bitmap (processed_coalesced_allocno_bitmap);
3742 allocno_coalesced_p = false;
3743 /* Assign stack slot numbers to spilled allocno sets, use smaller
3744 numbers for most frequently used coalesced allocnos. -1 is
3745 reserved for dynamic search of stack slots for pseudos spilled by
3746 the reload. */
3747 slot_num = 1;
3748 for (i = 0; i < num; i++)
3750 allocno = spilled_coalesced_allocnos[i];
3751 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
3752 || ALLOCNO_HARD_REGNO (allocno) >= 0
3753 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
3754 continue;
3755 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3756 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
3757 slot_num++;
3758 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3759 a = ALLOCNO_COALESCE_DATA (a)->next)
3761 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
3762 ALLOCNO_HARD_REGNO (a) = -slot_num;
3763 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3764 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
3765 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
3766 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
3767 reg_max_ref_width[ALLOCNO_REGNO (a)]));
3769 if (a == allocno)
3770 break;
3772 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3773 fprintf (ira_dump_file, "\n");
3775 ira_spilled_reg_stack_slots_num = slot_num - 1;
3776 ira_free (spilled_coalesced_allocnos);
3777 /* Sort regnos according the slot numbers. */
3778 regno_max_ref_width = reg_max_ref_width;
3779 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
3780 FOR_EACH_ALLOCNO (a, ai)
3781 ALLOCNO_ADD_DATA (a) = NULL;
3782 ira_free (allocno_coalesce_data);
3783 ira_free (regno_coalesced_allocno_num);
3784 ira_free (regno_coalesced_allocno_cost);
3789 /* This page contains code used by the reload pass to improve the
3790 final code. */
3792 /* The function is called from reload to mark changes in the
3793 allocation of REGNO made by the reload. Remember that reg_renumber
3794 reflects the change result. */
3795 void
3796 ira_mark_allocation_change (int regno)
3798 ira_allocno_t a = ira_regno_allocno_map[regno];
3799 int old_hard_regno, hard_regno, cost;
3800 enum reg_class aclass = ALLOCNO_CLASS (a);
3802 ira_assert (a != NULL);
3803 hard_regno = reg_renumber[regno];
3804 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
3805 return;
3806 if (old_hard_regno < 0)
3807 cost = -ALLOCNO_MEMORY_COST (a);
3808 else
3810 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
3811 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
3812 ? ALLOCNO_CLASS_COST (a)
3813 : ALLOCNO_HARD_REG_COSTS (a)
3814 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
3815 update_copy_costs (a, false);
3817 ira_overall_cost -= cost;
3818 ALLOCNO_HARD_REGNO (a) = hard_regno;
3819 if (hard_regno < 0)
3821 ALLOCNO_HARD_REGNO (a) = -1;
3822 cost += ALLOCNO_MEMORY_COST (a);
3824 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
3826 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
3827 ? ALLOCNO_CLASS_COST (a)
3828 : ALLOCNO_HARD_REG_COSTS (a)
3829 [ira_class_hard_reg_index[aclass][hard_regno]]);
3830 update_copy_costs (a, true);
3832 else
3833 /* Reload changed class of the allocno. */
3834 cost = 0;
3835 ira_overall_cost += cost;
3838 /* This function is called when reload deletes memory-memory move. In
3839 this case we marks that the allocation of the corresponding
3840 allocnos should be not changed in future. Otherwise we risk to get
3841 a wrong code. */
3842 void
3843 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
3845 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
3846 ira_allocno_t src = ira_regno_allocno_map[src_regno];
3848 ira_assert (dst != NULL && src != NULL
3849 && ALLOCNO_HARD_REGNO (dst) < 0
3850 && ALLOCNO_HARD_REGNO (src) < 0);
3851 ALLOCNO_DONT_REASSIGN_P (dst) = true;
3852 ALLOCNO_DONT_REASSIGN_P (src) = true;
3855 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
3856 allocno A and return TRUE in the case of success. */
3857 static bool
3858 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
3860 int hard_regno;
3861 enum reg_class aclass;
3862 int regno = ALLOCNO_REGNO (a);
3863 HARD_REG_SET saved[2];
3864 int i, n;
3866 n = ALLOCNO_NUM_OBJECTS (a);
3867 for (i = 0; i < n; i++)
3869 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3870 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
3871 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
3872 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3873 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
3874 call_used_reg_set);
3876 ALLOCNO_ASSIGNED_P (a) = false;
3877 aclass = ALLOCNO_CLASS (a);
3878 update_curr_costs (a);
3879 assign_hard_reg (a, true);
3880 hard_regno = ALLOCNO_HARD_REGNO (a);
3881 reg_renumber[regno] = hard_regno;
3882 if (hard_regno < 0)
3883 ALLOCNO_HARD_REGNO (a) = -1;
3884 else
3886 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
3887 ira_overall_cost
3888 -= (ALLOCNO_MEMORY_COST (a)
3889 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3890 ? ALLOCNO_CLASS_COST (a)
3891 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
3892 [aclass][hard_regno]]));
3893 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
3894 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
3895 call_used_reg_set))
3897 ira_assert (flag_caller_saves);
3898 caller_save_needed = 1;
3902 /* If we found a hard register, modify the RTL for the pseudo
3903 register to show the hard register, and mark the pseudo register
3904 live. */
3905 if (reg_renumber[regno] >= 0)
3907 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3908 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
3909 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
3910 mark_home_live (regno);
3912 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3913 fprintf (ira_dump_file, "\n");
3914 for (i = 0; i < n; i++)
3916 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3917 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
3919 return reg_renumber[regno] >= 0;
3922 /* Sort pseudos according their usage frequencies (putting most
3923 frequently ones first). */
3924 static int
3925 pseudo_reg_compare (const void *v1p, const void *v2p)
3927 int regno1 = *(const int *) v1p;
3928 int regno2 = *(const int *) v2p;
3929 int diff;
3931 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
3932 return diff;
3933 return regno1 - regno2;
3936 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
3937 NUM of them) or spilled pseudos conflicting with pseudos in
3938 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
3939 allocation has been changed. The function doesn't use
3940 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
3941 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
3942 is called by the reload pass at the end of each reload
3943 iteration. */
3944 bool
3945 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
3946 HARD_REG_SET bad_spill_regs,
3947 HARD_REG_SET *pseudo_forbidden_regs,
3948 HARD_REG_SET *pseudo_previous_regs,
3949 bitmap spilled)
3951 int i, n, regno;
3952 bool changed_p;
3953 ira_allocno_t a;
3954 HARD_REG_SET forbidden_regs;
3955 bitmap temp = BITMAP_ALLOC (NULL);
3957 /* Add pseudos which conflict with pseudos already in
3958 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
3959 to allocating in two steps as some of the conflicts might have
3960 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
3961 for (i = 0; i < num; i++)
3962 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
3964 for (i = 0, n = num; i < n; i++)
3966 int nr, j;
3967 int regno = spilled_pseudo_regs[i];
3968 bitmap_set_bit (temp, regno);
3970 a = ira_regno_allocno_map[regno];
3971 nr = ALLOCNO_NUM_OBJECTS (a);
3972 for (j = 0; j < nr; j++)
3974 ira_object_t conflict_obj;
3975 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3976 ira_object_conflict_iterator oci;
3978 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3980 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3981 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
3982 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
3983 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
3985 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
3986 /* ?!? This seems wrong. */
3987 bitmap_set_bit (consideration_allocno_bitmap,
3988 ALLOCNO_NUM (conflict_a));
3994 if (num > 1)
3995 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
3996 changed_p = false;
3997 /* Try to assign hard registers to pseudos from
3998 SPILLED_PSEUDO_REGS. */
3999 for (i = 0; i < num; i++)
4001 regno = spilled_pseudo_regs[i];
4002 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4003 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4004 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4005 gcc_assert (reg_renumber[regno] < 0);
4006 a = ira_regno_allocno_map[regno];
4007 ira_mark_allocation_change (regno);
4008 ira_assert (reg_renumber[regno] < 0);
4009 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4010 fprintf (ira_dump_file,
4011 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4012 ALLOCNO_MEMORY_COST (a)
4013 - ALLOCNO_CLASS_COST (a));
4014 allocno_reload_assign (a, forbidden_regs);
4015 if (reg_renumber[regno] >= 0)
4017 CLEAR_REGNO_REG_SET (spilled, regno);
4018 changed_p = true;
4021 BITMAP_FREE (temp);
4022 return changed_p;
4025 /* The function is called by reload and returns already allocated
4026 stack slot (if any) for REGNO with given INHERENT_SIZE and
4027 TOTAL_SIZE. In the case of failure to find a slot which can be
4028 used for REGNO, the function returns NULL. */
4030 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4031 unsigned int total_size)
4033 unsigned int i;
4034 int slot_num, best_slot_num;
4035 int cost, best_cost;
4036 ira_copy_t cp, next_cp;
4037 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4038 rtx x;
4039 bitmap_iterator bi;
4040 struct ira_spilled_reg_stack_slot *slot = NULL;
4042 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4043 && inherent_size <= total_size
4044 && ALLOCNO_HARD_REGNO (allocno) < 0);
4045 if (! flag_ira_share_spill_slots)
4046 return NULL_RTX;
4047 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4048 if (slot_num != -1)
4050 slot = &ira_spilled_reg_stack_slots[slot_num];
4051 x = slot->mem;
4053 else
4055 best_cost = best_slot_num = -1;
4056 x = NULL_RTX;
4057 /* It means that the pseudo was spilled in the reload pass, try
4058 to reuse a slot. */
4059 for (slot_num = 0;
4060 slot_num < ira_spilled_reg_stack_slots_num;
4061 slot_num++)
4063 slot = &ira_spilled_reg_stack_slots[slot_num];
4064 if (slot->mem == NULL_RTX)
4065 continue;
4066 if (slot->width < total_size
4067 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4068 continue;
4070 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4071 FIRST_PSEUDO_REGISTER, i, bi)
4073 another_allocno = ira_regno_allocno_map[i];
4074 if (allocnos_conflict_by_live_ranges_p (allocno,
4075 another_allocno))
4076 goto cont;
4078 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4079 cp != NULL;
4080 cp = next_cp)
4082 if (cp->first == allocno)
4084 next_cp = cp->next_first_allocno_copy;
4085 another_allocno = cp->second;
4087 else if (cp->second == allocno)
4089 next_cp = cp->next_second_allocno_copy;
4090 another_allocno = cp->first;
4092 else
4093 gcc_unreachable ();
4094 if (cp->insn == NULL_RTX)
4095 continue;
4096 if (bitmap_bit_p (&slot->spilled_regs,
4097 ALLOCNO_REGNO (another_allocno)))
4098 cost += cp->freq;
4100 if (cost > best_cost)
4102 best_cost = cost;
4103 best_slot_num = slot_num;
4105 cont:
4108 if (best_cost >= 0)
4110 slot_num = best_slot_num;
4111 slot = &ira_spilled_reg_stack_slots[slot_num];
4112 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4113 x = slot->mem;
4114 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4117 if (x != NULL_RTX)
4119 ira_assert (slot->width >= total_size);
4120 #ifdef ENABLE_IRA_CHECKING
4121 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4122 FIRST_PSEUDO_REGISTER, i, bi)
4124 ira_assert (! conflict_by_live_ranges_p (regno, i));
4126 #endif
4127 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4128 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4130 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4131 regno, REG_FREQ (regno), slot_num);
4132 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4133 FIRST_PSEUDO_REGISTER, i, bi)
4135 if ((unsigned) regno != i)
4136 fprintf (ira_dump_file, " %d", i);
4138 fprintf (ira_dump_file, "\n");
4141 return x;
4144 /* This is called by reload every time a new stack slot X with
4145 TOTAL_SIZE was allocated for REGNO. We store this info for
4146 subsequent ira_reuse_stack_slot calls. */
4147 void
4148 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4150 struct ira_spilled_reg_stack_slot *slot;
4151 int slot_num;
4152 ira_allocno_t allocno;
4154 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4155 allocno = ira_regno_allocno_map[regno];
4156 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4157 if (slot_num == -1)
4159 slot_num = ira_spilled_reg_stack_slots_num++;
4160 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4162 slot = &ira_spilled_reg_stack_slots[slot_num];
4163 INIT_REG_SET (&slot->spilled_regs);
4164 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4165 slot->mem = x;
4166 slot->width = total_size;
4167 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4168 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4169 regno, REG_FREQ (regno), slot_num);
4173 /* Return spill cost for pseudo-registers whose numbers are in array
4174 REGNOS (with a negative number as an end marker) for reload with
4175 given IN and OUT for INSN. Return also number points (through
4176 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4177 the register pressure is high, number of references of the
4178 pseudo-registers (through NREFS), number of callee-clobbered
4179 hard-registers occupied by the pseudo-registers (through
4180 CALL_USED_COUNT), and the first hard regno occupied by the
4181 pseudo-registers (through FIRST_HARD_REGNO). */
4182 static int
4183 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
4184 int *excess_pressure_live_length,
4185 int *nrefs, int *call_used_count, int *first_hard_regno)
4187 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4188 bool in_p, out_p;
4189 int length;
4190 ira_allocno_t a;
4192 *nrefs = 0;
4193 for (length = count = cost = i = 0;; i++)
4195 regno = regnos[i];
4196 if (regno < 0)
4197 break;
4198 *nrefs += REG_N_REFS (regno);
4199 hard_regno = reg_renumber[regno];
4200 ira_assert (hard_regno >= 0);
4201 a = ira_regno_allocno_map[regno];
4202 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4203 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4204 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4205 for (j = 0; j < nregs; j++)
4206 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4207 break;
4208 if (j == nregs)
4209 count++;
4210 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4211 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4212 if ((in_p || out_p)
4213 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4215 saved_cost = 0;
4216 if (in_p)
4217 saved_cost += ira_memory_move_cost
4218 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4219 if (out_p)
4220 saved_cost
4221 += ira_memory_move_cost
4222 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4223 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4226 *excess_pressure_live_length = length;
4227 *call_used_count = count;
4228 hard_regno = -1;
4229 if (regnos[0] >= 0)
4231 hard_regno = reg_renumber[regnos[0]];
4233 *first_hard_regno = hard_regno;
4234 return cost;
4237 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4238 REGNOS is better than spilling pseudo-registers with numbers in
4239 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4240 function used by the reload pass to make better register spilling
4241 decisions. */
4242 bool
4243 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4244 rtx in, rtx out, rtx insn)
4246 int cost, other_cost;
4247 int length, other_length;
4248 int nrefs, other_nrefs;
4249 int call_used_count, other_call_used_count;
4250 int hard_regno, other_hard_regno;
4252 cost = calculate_spill_cost (regnos, in, out, insn,
4253 &length, &nrefs, &call_used_count, &hard_regno);
4254 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4255 &other_length, &other_nrefs,
4256 &other_call_used_count,
4257 &other_hard_regno);
4258 if (nrefs == 0 && other_nrefs != 0)
4259 return true;
4260 if (nrefs != 0 && other_nrefs == 0)
4261 return false;
4262 if (cost != other_cost)
4263 return cost < other_cost;
4264 if (length != other_length)
4265 return length > other_length;
4266 #ifdef REG_ALLOC_ORDER
4267 if (hard_regno >= 0 && other_hard_regno >= 0)
4268 return (inv_reg_alloc_order[hard_regno]
4269 < inv_reg_alloc_order[other_hard_regno]);
4270 #else
4271 if (call_used_count != other_call_used_count)
4272 return call_used_count > other_call_used_count;
4273 #endif
4274 return false;
4279 /* Allocate and initialize data necessary for assign_hard_reg. */
4280 void
4281 ira_initiate_assign (void)
4283 sorted_allocnos
4284 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4285 * ira_allocnos_num);
4286 consideration_allocno_bitmap = ira_allocate_bitmap ();
4287 initiate_cost_update ();
4288 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4291 /* Deallocate data used by assign_hard_reg. */
4292 void
4293 ira_finish_assign (void)
4295 ira_free (sorted_allocnos);
4296 ira_free_bitmap (consideration_allocno_bitmap);
4297 finish_cost_update ();
4298 ira_free (allocno_priorities);
4303 /* Entry function doing color-based register allocation. */
4304 static void
4305 color (void)
4307 allocno_stack_vec.create (ira_allocnos_num);
4308 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4309 ira_initiate_assign ();
4310 do_coloring ();
4311 ira_finish_assign ();
4312 allocno_stack_vec.release ();
4313 move_spill_restore ();
4318 /* This page contains a simple register allocator without usage of
4319 allocno conflicts. This is used for fast allocation for -O0. */
4321 /* Do register allocation by not using allocno conflicts. It uses
4322 only allocno live ranges. The algorithm is close to Chow's
4323 priority coloring. */
4324 static void
4325 fast_allocation (void)
4327 int i, j, k, num, class_size, hard_regno;
4328 #ifdef STACK_REGS
4329 bool no_stack_reg_p;
4330 #endif
4331 enum reg_class aclass;
4332 enum machine_mode mode;
4333 ira_allocno_t a;
4334 ira_allocno_iterator ai;
4335 live_range_t r;
4336 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4338 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4339 * ira_allocnos_num);
4340 num = 0;
4341 FOR_EACH_ALLOCNO (a, ai)
4342 sorted_allocnos[num++] = a;
4343 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4344 setup_allocno_priorities (sorted_allocnos, num);
4345 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4346 * ira_max_point);
4347 for (i = 0; i < ira_max_point; i++)
4348 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4349 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4350 allocno_priority_compare_func);
4351 for (i = 0; i < num; i++)
4353 int nr, l;
4355 a = sorted_allocnos[i];
4356 nr = ALLOCNO_NUM_OBJECTS (a);
4357 CLEAR_HARD_REG_SET (conflict_hard_regs);
4358 for (l = 0; l < nr; l++)
4360 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4361 IOR_HARD_REG_SET (conflict_hard_regs,
4362 OBJECT_CONFLICT_HARD_REGS (obj));
4363 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4364 for (j = r->start; j <= r->finish; j++)
4365 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4367 aclass = ALLOCNO_CLASS (a);
4368 ALLOCNO_ASSIGNED_P (a) = true;
4369 ALLOCNO_HARD_REGNO (a) = -1;
4370 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4371 conflict_hard_regs))
4372 continue;
4373 mode = ALLOCNO_MODE (a);
4374 #ifdef STACK_REGS
4375 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4376 #endif
4377 class_size = ira_class_hard_regs_num[aclass];
4378 for (j = 0; j < class_size; j++)
4380 hard_regno = ira_class_hard_regs[aclass][j];
4381 #ifdef STACK_REGS
4382 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4383 && hard_regno <= LAST_STACK_REG)
4384 continue;
4385 #endif
4386 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4387 || (TEST_HARD_REG_BIT
4388 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4389 continue;
4390 ALLOCNO_HARD_REGNO (a) = hard_regno;
4391 for (l = 0; l < nr; l++)
4393 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4394 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4395 for (k = r->start; k <= r->finish; k++)
4396 IOR_HARD_REG_SET (used_hard_regs[k],
4397 ira_reg_mode_hard_regset[hard_regno][mode]);
4399 break;
4402 ira_free (sorted_allocnos);
4403 ira_free (used_hard_regs);
4404 ira_free (allocno_priorities);
4405 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4406 ira_print_disposition (ira_dump_file);
4411 /* Entry function doing coloring. */
4412 void
4413 ira_color (void)
4415 ira_allocno_t a;
4416 ira_allocno_iterator ai;
4418 /* Setup updated costs. */
4419 FOR_EACH_ALLOCNO (a, ai)
4421 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4422 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4424 if (ira_conflicts_p)
4425 color ();
4426 else
4427 fast_allocation ();