* lto-partition.c (add_symbol_to_partition_1,
[official-gcc.git] / gcc / ira-color.c
blobc20aaf72dc6097bffb81c89b948d67125198b146
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
3 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "target.h"
28 #include "regs.h"
29 #include "flags.h"
30 #include "sbitmap.h"
31 #include "bitmap.h"
32 #include "hash-table.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "reload.h"
38 #include "params.h"
39 #include "df.h"
40 #include "ira-int.h"
42 typedef struct allocno_hard_regs *allocno_hard_regs_t;
44 /* The structure contains information about hard registers can be
45 assigned to allocnos. Usually it is allocno profitable hard
46 registers but in some cases this set can be a bit different. Major
47 reason of the difference is a requirement to use hard register sets
48 that form a tree or a forest (set of trees), i.e. hard register set
49 of a node should contain hard register sets of its subnodes. */
50 struct allocno_hard_regs
52 /* Hard registers can be assigned to an allocno. */
53 HARD_REG_SET set;
54 /* Overall (spilling) cost of all allocnos with given register
55 set. */
56 HOST_WIDEST_INT cost;
59 typedef struct allocno_hard_regs_node *allocno_hard_regs_node_t;
61 /* A node representing allocno hard registers. Such nodes form a
62 forest (set of trees). Each subnode of given node in the forest
63 refers for hard register set (usually allocno profitable hard
64 register set) which is a subset of one referred from given
65 node. */
66 struct allocno_hard_regs_node
68 /* Set up number of the node in preorder traversing of the forest. */
69 int preorder_num;
70 /* Used for different calculation like finding conflict size of an
71 allocno. */
72 int check;
73 /* Used for calculation of conflict size of an allocno. The
74 conflict size of the allocno is maximal number of given allocno
75 hard registers needed for allocation of the conflicting allocnos.
76 Given allocno is trivially colored if this number plus the number
77 of hard registers needed for given allocno is not greater than
78 the number of given allocno hard register set. */
79 int conflict_size;
80 /* The number of hard registers given by member hard_regs. */
81 int hard_regs_num;
82 /* The following member is used to form the final forest. */
83 bool used_p;
84 /* Pointer to the corresponding profitable hard registers. */
85 allocno_hard_regs_t hard_regs;
86 /* Parent, first subnode, previous and next node with the same
87 parent in the forest. */
88 allocno_hard_regs_node_t parent, first, prev, next;
91 /* Info about changing hard reg costs of an allocno. */
92 struct update_cost_record
94 /* Hard regno for which we changed the cost. */
95 int hard_regno;
96 /* Divisor used when we changed the cost of HARD_REGNO. */
97 int divisor;
98 /* Next record for given allocno. */
99 struct update_cost_record *next;
102 /* To decrease footprint of ira_allocno structure we store all data
103 needed only for coloring in the following structure. */
104 struct allocno_color_data
106 /* TRUE value means that the allocno was not removed yet from the
107 conflicting graph during colouring. */
108 unsigned int in_graph_p : 1;
109 /* TRUE if it is put on the stack to make other allocnos
110 colorable. */
111 unsigned int may_be_spilled_p : 1;
112 /* TRUE if the allocno is trivially colorable. */
113 unsigned int colorable_p : 1;
114 /* Number of hard registers of the allocno class really
115 available for the allocno allocation. It is number of the
116 profitable hard regs. */
117 int available_regs_num;
118 /* Allocnos in a bucket (used in coloring) chained by the following
119 two members. */
120 ira_allocno_t next_bucket_allocno;
121 ira_allocno_t prev_bucket_allocno;
122 /* Used for temporary purposes. */
123 int temp;
124 /* Used to exclude repeated processing. */
125 int last_process;
126 /* Profitable hard regs available for this pseudo allocation. It
127 means that the set excludes unavailable hard regs and hard regs
128 conflicting with given pseudo. They should be of the allocno
129 class. */
130 HARD_REG_SET profitable_hard_regs;
131 /* The allocno hard registers node. */
132 allocno_hard_regs_node_t hard_regs_node;
133 /* Array of structures allocno_hard_regs_subnode representing
134 given allocno hard registers node (the 1st element in the array)
135 and all its subnodes in the tree (forest) of allocno hard
136 register nodes (see comments above). */
137 int hard_regs_subnodes_start;
138 /* The length of the previous array. */
139 int hard_regs_subnodes_num;
140 /* Records about updating allocno hard reg costs from copies. If
141 the allocno did not get expected hard register, these records are
142 used to restore original hard reg costs of allocnos connected to
143 this allocno by copies. */
144 struct update_cost_record *update_cost_records;
145 /* Threads. We collect allocnos connected by copies into threads
146 and try to assign hard regs to allocnos by threads. */
147 /* Allocno representing all thread. */
148 ira_allocno_t first_thread_allocno;
149 /* Allocnos in thread forms a cycle list through the following
150 member. */
151 ira_allocno_t next_thread_allocno;
152 /* All thread frequency. Defined only for first thread allocno. */
153 int thread_freq;
156 /* See above. */
157 typedef struct allocno_color_data *allocno_color_data_t;
159 /* Container for storing allocno data concerning coloring. */
160 static allocno_color_data_t allocno_color_data;
162 /* Macro to access the data concerning coloring. */
163 #define ALLOCNO_COLOR_DATA(a) ((allocno_color_data_t) ALLOCNO_ADD_DATA (a))
165 /* Used for finding allocno colorability to exclude repeated allocno
166 processing and for updating preferencing to exclude repeated
167 allocno processing during assignment. */
168 static int curr_allocno_process;
170 /* This file contains code for regional graph coloring, spill/restore
171 code placement optimization, and code helping the reload pass to do
172 a better job. */
174 /* Bitmap of allocnos which should be colored. */
175 static bitmap coloring_allocno_bitmap;
177 /* Bitmap of allocnos which should be taken into account during
178 coloring. In general case it contains allocnos from
179 coloring_allocno_bitmap plus other already colored conflicting
180 allocnos. */
181 static bitmap consideration_allocno_bitmap;
183 /* All allocnos sorted according their priorities. */
184 static ira_allocno_t *sorted_allocnos;
186 /* Vec representing the stack of allocnos used during coloring. */
187 static vec<ira_allocno_t> allocno_stack_vec;
189 /* Helper for qsort comparison callbacks - return a positive integer if
190 X > Y, or a negative value otherwise. Use a conditional expression
191 instead of a difference computation to insulate from possible overflow
192 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
193 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
197 /* Definition of vector of allocno hard registers. */
199 /* Vector of unique allocno hard registers. */
200 static vec<allocno_hard_regs_t> allocno_hard_regs_vec;
202 struct allocno_hard_regs_hasher : typed_noop_remove <allocno_hard_regs>
204 typedef allocno_hard_regs value_type;
205 typedef allocno_hard_regs compare_type;
206 static inline hashval_t hash (const value_type *);
207 static inline bool equal (const value_type *, const compare_type *);
210 /* Returns hash value for allocno hard registers V. */
211 inline hashval_t
212 allocno_hard_regs_hasher::hash (const value_type *hv)
214 return iterative_hash (&hv->set, sizeof (HARD_REG_SET), 0);
217 /* Compares allocno hard registers V1 and V2. */
218 inline bool
219 allocno_hard_regs_hasher::equal (const value_type *hv1, const compare_type *hv2)
221 return hard_reg_set_equal_p (hv1->set, hv2->set);
224 /* Hash table of unique allocno hard registers. */
225 static hash_table <allocno_hard_regs_hasher> allocno_hard_regs_htab;
227 /* Return allocno hard registers in the hash table equal to HV. */
228 static allocno_hard_regs_t
229 find_hard_regs (allocno_hard_regs_t hv)
231 return allocno_hard_regs_htab.find (hv);
234 /* Insert allocno hard registers HV in the hash table (if it is not
235 there yet) and return the value which in the table. */
236 static allocno_hard_regs_t
237 insert_hard_regs (allocno_hard_regs_t hv)
239 allocno_hard_regs **slot = allocno_hard_regs_htab.find_slot (hv, INSERT);
241 if (*slot == NULL)
242 *slot = hv;
243 return *slot;
246 /* Initialize data concerning allocno hard registers. */
247 static void
248 init_allocno_hard_regs (void)
250 allocno_hard_regs_vec.create (200);
251 allocno_hard_regs_htab.create (200);
254 /* Add (or update info about) allocno hard registers with SET and
255 COST. */
256 static allocno_hard_regs_t
257 add_allocno_hard_regs (HARD_REG_SET set, HOST_WIDEST_INT cost)
259 struct allocno_hard_regs temp;
260 allocno_hard_regs_t hv;
262 gcc_assert (! hard_reg_set_empty_p (set));
263 COPY_HARD_REG_SET (temp.set, set);
264 if ((hv = find_hard_regs (&temp)) != NULL)
265 hv->cost += cost;
266 else
268 hv = ((struct allocno_hard_regs *)
269 ira_allocate (sizeof (struct allocno_hard_regs)));
270 COPY_HARD_REG_SET (hv->set, set);
271 hv->cost = cost;
272 allocno_hard_regs_vec.safe_push (hv);
273 insert_hard_regs (hv);
275 return hv;
278 /* Finalize data concerning allocno hard registers. */
279 static void
280 finish_allocno_hard_regs (void)
282 int i;
283 allocno_hard_regs_t hv;
285 for (i = 0;
286 allocno_hard_regs_vec.iterate (i, &hv);
287 i++)
288 ira_free (hv);
289 allocno_hard_regs_htab.dispose ();
290 allocno_hard_regs_vec.release ();
293 /* Sort hard regs according to their frequency of usage. */
294 static int
295 allocno_hard_regs_compare (const void *v1p, const void *v2p)
297 allocno_hard_regs_t hv1 = *(const allocno_hard_regs_t *) v1p;
298 allocno_hard_regs_t hv2 = *(const allocno_hard_regs_t *) v2p;
300 if (hv2->cost > hv1->cost)
301 return 1;
302 else if (hv2->cost < hv1->cost)
303 return -1;
304 else
305 return 0;
310 /* Used for finding a common ancestor of two allocno hard registers
311 nodes in the forest. We use the current value of
312 'node_check_tick' to mark all nodes from one node to the top and
313 then walking up from another node until we find a marked node.
315 It is also used to figure out allocno colorability as a mark that
316 we already reset value of member 'conflict_size' for the forest
317 node corresponding to the processed allocno. */
318 static int node_check_tick;
320 /* Roots of the forest containing hard register sets can be assigned
321 to allocnos. */
322 static allocno_hard_regs_node_t hard_regs_roots;
324 /* Definition of vector of allocno hard register nodes. */
326 /* Vector used to create the forest. */
327 static vec<allocno_hard_regs_node_t> hard_regs_node_vec;
329 /* Create and return allocno hard registers node containing allocno
330 hard registers HV. */
331 static allocno_hard_regs_node_t
332 create_new_allocno_hard_regs_node (allocno_hard_regs_t hv)
334 allocno_hard_regs_node_t new_node;
336 new_node = ((struct allocno_hard_regs_node *)
337 ira_allocate (sizeof (struct allocno_hard_regs_node)));
338 new_node->check = 0;
339 new_node->hard_regs = hv;
340 new_node->hard_regs_num = hard_reg_set_size (hv->set);
341 new_node->first = NULL;
342 new_node->used_p = false;
343 return new_node;
346 /* Add allocno hard registers node NEW_NODE to the forest on its level
347 given by ROOTS. */
348 static void
349 add_new_allocno_hard_regs_node_to_forest (allocno_hard_regs_node_t *roots,
350 allocno_hard_regs_node_t new_node)
352 new_node->next = *roots;
353 if (new_node->next != NULL)
354 new_node->next->prev = new_node;
355 new_node->prev = NULL;
356 *roots = new_node;
359 /* Add allocno hard registers HV (or its best approximation if it is
360 not possible) to the forest on its level given by ROOTS. */
361 static void
362 add_allocno_hard_regs_to_forest (allocno_hard_regs_node_t *roots,
363 allocno_hard_regs_t hv)
365 unsigned int i, start;
366 allocno_hard_regs_node_t node, prev, new_node;
367 HARD_REG_SET temp_set;
368 allocno_hard_regs_t hv2;
370 start = hard_regs_node_vec.length ();
371 for (node = *roots; node != NULL; node = node->next)
373 if (hard_reg_set_equal_p (hv->set, node->hard_regs->set))
374 return;
375 if (hard_reg_set_subset_p (hv->set, node->hard_regs->set))
377 add_allocno_hard_regs_to_forest (&node->first, hv);
378 return;
380 if (hard_reg_set_subset_p (node->hard_regs->set, hv->set))
381 hard_regs_node_vec.safe_push (node);
382 else if (hard_reg_set_intersect_p (hv->set, node->hard_regs->set))
384 COPY_HARD_REG_SET (temp_set, hv->set);
385 AND_HARD_REG_SET (temp_set, node->hard_regs->set);
386 hv2 = add_allocno_hard_regs (temp_set, hv->cost);
387 add_allocno_hard_regs_to_forest (&node->first, hv2);
390 if (hard_regs_node_vec.length ()
391 > start + 1)
393 /* Create a new node which contains nodes in hard_regs_node_vec. */
394 CLEAR_HARD_REG_SET (temp_set);
395 for (i = start;
396 i < hard_regs_node_vec.length ();
397 i++)
399 node = hard_regs_node_vec[i];
400 IOR_HARD_REG_SET (temp_set, node->hard_regs->set);
402 hv = add_allocno_hard_regs (temp_set, hv->cost);
403 new_node = create_new_allocno_hard_regs_node (hv);
404 prev = NULL;
405 for (i = start;
406 i < hard_regs_node_vec.length ();
407 i++)
409 node = hard_regs_node_vec[i];
410 if (node->prev == NULL)
411 *roots = node->next;
412 else
413 node->prev->next = node->next;
414 if (node->next != NULL)
415 node->next->prev = node->prev;
416 if (prev == NULL)
417 new_node->first = node;
418 else
419 prev->next = node;
420 node->prev = prev;
421 node->next = NULL;
422 prev = node;
424 add_new_allocno_hard_regs_node_to_forest (roots, new_node);
426 hard_regs_node_vec.truncate (start);
429 /* Add allocno hard registers nodes starting with the forest level
430 given by FIRST which contains biggest set inside SET. */
431 static void
432 collect_allocno_hard_regs_cover (allocno_hard_regs_node_t first,
433 HARD_REG_SET set)
435 allocno_hard_regs_node_t node;
437 ira_assert (first != NULL);
438 for (node = first; node != NULL; node = node->next)
439 if (hard_reg_set_subset_p (node->hard_regs->set, set))
440 hard_regs_node_vec.safe_push (node);
441 else if (hard_reg_set_intersect_p (set, node->hard_regs->set))
442 collect_allocno_hard_regs_cover (node->first, set);
445 /* Set up field parent as PARENT in all allocno hard registers nodes
446 in forest given by FIRST. */
447 static void
448 setup_allocno_hard_regs_nodes_parent (allocno_hard_regs_node_t first,
449 allocno_hard_regs_node_t parent)
451 allocno_hard_regs_node_t node;
453 for (node = first; node != NULL; node = node->next)
455 node->parent = parent;
456 setup_allocno_hard_regs_nodes_parent (node->first, node);
460 /* Return allocno hard registers node which is a first common ancestor
461 node of FIRST and SECOND in the forest. */
462 static allocno_hard_regs_node_t
463 first_common_ancestor_node (allocno_hard_regs_node_t first,
464 allocno_hard_regs_node_t second)
466 allocno_hard_regs_node_t node;
468 node_check_tick++;
469 for (node = first; node != NULL; node = node->parent)
470 node->check = node_check_tick;
471 for (node = second; node != NULL; node = node->parent)
472 if (node->check == node_check_tick)
473 return node;
474 return first_common_ancestor_node (second, first);
477 /* Print hard reg set SET to F. */
478 static void
479 print_hard_reg_set (FILE *f, HARD_REG_SET set, bool new_line_p)
481 int i, start;
483 for (start = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
485 if (TEST_HARD_REG_BIT (set, i))
487 if (i == 0 || ! TEST_HARD_REG_BIT (set, i - 1))
488 start = i;
490 if (start >= 0
491 && (i == FIRST_PSEUDO_REGISTER - 1 || ! TEST_HARD_REG_BIT (set, i)))
493 if (start == i - 1)
494 fprintf (f, " %d", start);
495 else if (start == i - 2)
496 fprintf (f, " %d %d", start, start + 1);
497 else
498 fprintf (f, " %d-%d", start, i - 1);
499 start = -1;
502 if (new_line_p)
503 fprintf (f, "\n");
506 /* Print allocno hard register subforest given by ROOTS and its LEVEL
507 to F. */
508 static void
509 print_hard_regs_subforest (FILE *f, allocno_hard_regs_node_t roots,
510 int level)
512 int i;
513 allocno_hard_regs_node_t node;
515 for (node = roots; node != NULL; node = node->next)
517 fprintf (f, " ");
518 for (i = 0; i < level * 2; i++)
519 fprintf (f, " ");
520 fprintf (f, "%d:(", node->preorder_num);
521 print_hard_reg_set (f, node->hard_regs->set, false);
522 fprintf (f, ")@" HOST_WIDEST_INT_PRINT_DEC "\n", node->hard_regs->cost);
523 print_hard_regs_subforest (f, node->first, level + 1);
527 /* Print the allocno hard register forest to F. */
528 static void
529 print_hard_regs_forest (FILE *f)
531 fprintf (f, " Hard reg set forest:\n");
532 print_hard_regs_subforest (f, hard_regs_roots, 1);
535 /* Print the allocno hard register forest to stderr. */
536 void
537 ira_debug_hard_regs_forest (void)
539 print_hard_regs_forest (stderr);
542 /* Remove unused allocno hard registers nodes from forest given by its
543 *ROOTS. */
544 static void
545 remove_unused_allocno_hard_regs_nodes (allocno_hard_regs_node_t *roots)
547 allocno_hard_regs_node_t node, prev, next, last;
549 for (prev = NULL, node = *roots; node != NULL; node = next)
551 next = node->next;
552 if (node->used_p)
554 remove_unused_allocno_hard_regs_nodes (&node->first);
555 prev = node;
557 else
559 for (last = node->first;
560 last != NULL && last->next != NULL;
561 last = last->next)
563 if (last != NULL)
565 if (prev == NULL)
566 *roots = node->first;
567 else
568 prev->next = node->first;
569 if (next != NULL)
570 next->prev = last;
571 last->next = next;
572 next = node->first;
574 else
576 if (prev == NULL)
577 *roots = next;
578 else
579 prev->next = next;
580 if (next != NULL)
581 next->prev = prev;
583 ira_free (node);
588 /* Set up fields preorder_num starting with START_NUM in all allocno
589 hard registers nodes in forest given by FIRST. Return biggest set
590 PREORDER_NUM increased by 1. */
591 static int
592 enumerate_allocno_hard_regs_nodes (allocno_hard_regs_node_t first,
593 allocno_hard_regs_node_t parent,
594 int start_num)
596 allocno_hard_regs_node_t node;
598 for (node = first; node != NULL; node = node->next)
600 node->preorder_num = start_num++;
601 node->parent = parent;
602 start_num = enumerate_allocno_hard_regs_nodes (node->first, node,
603 start_num);
605 return start_num;
608 /* Number of allocno hard registers nodes in the forest. */
609 static int allocno_hard_regs_nodes_num;
611 /* Table preorder number of allocno hard registers node in the forest
612 -> the allocno hard registers node. */
613 static allocno_hard_regs_node_t *allocno_hard_regs_nodes;
615 /* See below. */
616 typedef struct allocno_hard_regs_subnode *allocno_hard_regs_subnode_t;
618 /* The structure is used to describes all subnodes (not only immediate
619 ones) in the mentioned above tree for given allocno hard register
620 node. The usage of such data accelerates calculation of
621 colorability of given allocno. */
622 struct allocno_hard_regs_subnode
624 /* The conflict size of conflicting allocnos whose hard register
625 sets are equal sets (plus supersets if given node is given
626 allocno hard registers node) of one in the given node. */
627 int left_conflict_size;
628 /* The summary conflict size of conflicting allocnos whose hard
629 register sets are strict subsets of one in the given node.
630 Overall conflict size is
631 left_conflict_subnodes_size
632 + MIN (max_node_impact - left_conflict_subnodes_size,
633 left_conflict_size)
635 short left_conflict_subnodes_size;
636 short max_node_impact;
639 /* Container for hard regs subnodes of all allocnos. */
640 static allocno_hard_regs_subnode_t allocno_hard_regs_subnodes;
642 /* Table (preorder number of allocno hard registers node in the
643 forest, preorder number of allocno hard registers subnode) -> index
644 of the subnode relative to the node. -1 if it is not a
645 subnode. */
646 static int *allocno_hard_regs_subnode_index;
648 /* Setup arrays ALLOCNO_HARD_REGS_NODES and
649 ALLOCNO_HARD_REGS_SUBNODE_INDEX. */
650 static void
651 setup_allocno_hard_regs_subnode_index (allocno_hard_regs_node_t first)
653 allocno_hard_regs_node_t node, parent;
654 int index;
656 for (node = first; node != NULL; node = node->next)
658 allocno_hard_regs_nodes[node->preorder_num] = node;
659 for (parent = node; parent != NULL; parent = parent->parent)
661 index = parent->preorder_num * allocno_hard_regs_nodes_num;
662 allocno_hard_regs_subnode_index[index + node->preorder_num]
663 = node->preorder_num - parent->preorder_num;
665 setup_allocno_hard_regs_subnode_index (node->first);
669 /* Count all allocno hard registers nodes in tree ROOT. */
670 static int
671 get_allocno_hard_regs_subnodes_num (allocno_hard_regs_node_t root)
673 int len = 1;
675 for (root = root->first; root != NULL; root = root->next)
676 len += get_allocno_hard_regs_subnodes_num (root);
677 return len;
680 /* Build the forest of allocno hard registers nodes and assign each
681 allocno a node from the forest. */
682 static void
683 form_allocno_hard_regs_nodes_forest (void)
685 unsigned int i, j, size, len;
686 int start;
687 ira_allocno_t a;
688 allocno_hard_regs_t hv;
689 bitmap_iterator bi;
690 HARD_REG_SET temp;
691 allocno_hard_regs_node_t node, allocno_hard_regs_node;
692 allocno_color_data_t allocno_data;
694 node_check_tick = 0;
695 init_allocno_hard_regs ();
696 hard_regs_roots = NULL;
697 hard_regs_node_vec.create (100);
698 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
699 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
701 CLEAR_HARD_REG_SET (temp);
702 SET_HARD_REG_BIT (temp, i);
703 hv = add_allocno_hard_regs (temp, 0);
704 node = create_new_allocno_hard_regs_node (hv);
705 add_new_allocno_hard_regs_node_to_forest (&hard_regs_roots, node);
707 start = allocno_hard_regs_vec.length ();
708 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
710 a = ira_allocnos[i];
711 allocno_data = ALLOCNO_COLOR_DATA (a);
713 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
714 continue;
715 hv = (add_allocno_hard_regs
716 (allocno_data->profitable_hard_regs,
717 ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a)));
719 SET_HARD_REG_SET (temp);
720 AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
721 add_allocno_hard_regs (temp, 0);
722 qsort (allocno_hard_regs_vec.address () + start,
723 allocno_hard_regs_vec.length () - start,
724 sizeof (allocno_hard_regs_t), allocno_hard_regs_compare);
725 for (i = start;
726 allocno_hard_regs_vec.iterate (i, &hv);
727 i++)
729 add_allocno_hard_regs_to_forest (&hard_regs_roots, hv);
730 ira_assert (hard_regs_node_vec.length () == 0);
732 /* We need to set up parent fields for right work of
733 first_common_ancestor_node. */
734 setup_allocno_hard_regs_nodes_parent (hard_regs_roots, NULL);
735 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
737 a = ira_allocnos[i];
738 allocno_data = ALLOCNO_COLOR_DATA (a);
739 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
740 continue;
741 hard_regs_node_vec.truncate (0);
742 collect_allocno_hard_regs_cover (hard_regs_roots,
743 allocno_data->profitable_hard_regs);
744 allocno_hard_regs_node = NULL;
745 for (j = 0; hard_regs_node_vec.iterate (j, &node); j++)
746 allocno_hard_regs_node
747 = (j == 0
748 ? node
749 : first_common_ancestor_node (node, allocno_hard_regs_node));
750 /* That is a temporary storage. */
751 allocno_hard_regs_node->used_p = true;
752 allocno_data->hard_regs_node = allocno_hard_regs_node;
754 ira_assert (hard_regs_roots->next == NULL);
755 hard_regs_roots->used_p = true;
756 remove_unused_allocno_hard_regs_nodes (&hard_regs_roots);
757 allocno_hard_regs_nodes_num
758 = enumerate_allocno_hard_regs_nodes (hard_regs_roots, NULL, 0);
759 allocno_hard_regs_nodes
760 = ((allocno_hard_regs_node_t *)
761 ira_allocate (allocno_hard_regs_nodes_num
762 * sizeof (allocno_hard_regs_node_t)));
763 size = allocno_hard_regs_nodes_num * allocno_hard_regs_nodes_num;
764 allocno_hard_regs_subnode_index
765 = (int *) ira_allocate (size * sizeof (int));
766 for (i = 0; i < size; i++)
767 allocno_hard_regs_subnode_index[i] = -1;
768 setup_allocno_hard_regs_subnode_index (hard_regs_roots);
769 start = 0;
770 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
772 a = ira_allocnos[i];
773 allocno_data = ALLOCNO_COLOR_DATA (a);
774 if (hard_reg_set_empty_p (allocno_data->profitable_hard_regs))
775 continue;
776 len = get_allocno_hard_regs_subnodes_num (allocno_data->hard_regs_node);
777 allocno_data->hard_regs_subnodes_start = start;
778 allocno_data->hard_regs_subnodes_num = len;
779 start += len;
781 allocno_hard_regs_subnodes
782 = ((allocno_hard_regs_subnode_t)
783 ira_allocate (sizeof (struct allocno_hard_regs_subnode) * start));
784 hard_regs_node_vec.release ();
787 /* Free tree of allocno hard registers nodes given by its ROOT. */
788 static void
789 finish_allocno_hard_regs_nodes_tree (allocno_hard_regs_node_t root)
791 allocno_hard_regs_node_t child, next;
793 for (child = root->first; child != NULL; child = next)
795 next = child->next;
796 finish_allocno_hard_regs_nodes_tree (child);
798 ira_free (root);
801 /* Finish work with the forest of allocno hard registers nodes. */
802 static void
803 finish_allocno_hard_regs_nodes_forest (void)
805 allocno_hard_regs_node_t node, next;
807 ira_free (allocno_hard_regs_subnodes);
808 for (node = hard_regs_roots; node != NULL; node = next)
810 next = node->next;
811 finish_allocno_hard_regs_nodes_tree (node);
813 ira_free (allocno_hard_regs_nodes);
814 ira_free (allocno_hard_regs_subnode_index);
815 finish_allocno_hard_regs ();
818 /* Set up left conflict sizes and left conflict subnodes sizes of hard
819 registers subnodes of allocno A. Return TRUE if allocno A is
820 trivially colorable. */
821 static bool
822 setup_left_conflict_sizes_p (ira_allocno_t a)
824 int i, k, nobj, start;
825 int conflict_size, left_conflict_subnodes_size, node_preorder_num;
826 allocno_color_data_t data;
827 HARD_REG_SET profitable_hard_regs;
828 allocno_hard_regs_subnode_t subnodes;
829 allocno_hard_regs_node_t node;
830 HARD_REG_SET node_set;
832 nobj = ALLOCNO_NUM_OBJECTS (a);
833 conflict_size = 0;
834 data = ALLOCNO_COLOR_DATA (a);
835 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
836 COPY_HARD_REG_SET (profitable_hard_regs, data->profitable_hard_regs);
837 node = data->hard_regs_node;
838 node_preorder_num = node->preorder_num;
839 COPY_HARD_REG_SET (node_set, node->hard_regs->set);
840 node_check_tick++;
841 for (k = 0; k < nobj; k++)
843 ira_object_t obj = ALLOCNO_OBJECT (a, k);
844 ira_object_t conflict_obj;
845 ira_object_conflict_iterator oci;
847 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
849 int size;
850 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
851 allocno_hard_regs_node_t conflict_node, temp_node;
852 HARD_REG_SET conflict_node_set;
853 allocno_color_data_t conflict_data;
855 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
856 if (! ALLOCNO_COLOR_DATA (conflict_a)->in_graph_p
857 || ! hard_reg_set_intersect_p (profitable_hard_regs,
858 conflict_data
859 ->profitable_hard_regs))
860 continue;
861 conflict_node = conflict_data->hard_regs_node;
862 COPY_HARD_REG_SET (conflict_node_set, conflict_node->hard_regs->set);
863 if (hard_reg_set_subset_p (node_set, conflict_node_set))
864 temp_node = node;
865 else
867 ira_assert (hard_reg_set_subset_p (conflict_node_set, node_set));
868 temp_node = conflict_node;
870 if (temp_node->check != node_check_tick)
872 temp_node->check = node_check_tick;
873 temp_node->conflict_size = 0;
875 size = (ira_reg_class_max_nregs
876 [ALLOCNO_CLASS (conflict_a)][ALLOCNO_MODE (conflict_a)]);
877 if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
878 /* We will deal with the subwords individually. */
879 size = 1;
880 temp_node->conflict_size += size;
883 for (i = 0; i < data->hard_regs_subnodes_num; i++)
885 allocno_hard_regs_node_t temp_node;
887 temp_node = allocno_hard_regs_nodes[i + node_preorder_num];
888 ira_assert (temp_node->preorder_num == i + node_preorder_num);
889 subnodes[i].left_conflict_size = (temp_node->check != node_check_tick
890 ? 0 : temp_node->conflict_size);
891 if (hard_reg_set_subset_p (temp_node->hard_regs->set,
892 profitable_hard_regs))
893 subnodes[i].max_node_impact = temp_node->hard_regs_num;
894 else
896 HARD_REG_SET temp_set;
897 int j, n, hard_regno;
898 enum reg_class aclass;
900 COPY_HARD_REG_SET (temp_set, temp_node->hard_regs->set);
901 AND_HARD_REG_SET (temp_set, profitable_hard_regs);
902 aclass = ALLOCNO_CLASS (a);
903 for (n = 0, j = ira_class_hard_regs_num[aclass] - 1; j >= 0; j--)
905 hard_regno = ira_class_hard_regs[aclass][j];
906 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
907 n++;
909 subnodes[i].max_node_impact = n;
911 subnodes[i].left_conflict_subnodes_size = 0;
913 start = node_preorder_num * allocno_hard_regs_nodes_num;
914 for (i = data->hard_regs_subnodes_num - 1; i >= 0; i--)
916 int size, parent_i;
917 allocno_hard_regs_node_t parent;
919 size = (subnodes[i].left_conflict_subnodes_size
920 + MIN (subnodes[i].max_node_impact
921 - subnodes[i].left_conflict_subnodes_size,
922 subnodes[i].left_conflict_size));
923 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
924 if (parent == NULL)
925 continue;
926 parent_i
927 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
928 if (parent_i < 0)
929 continue;
930 subnodes[parent_i].left_conflict_subnodes_size += size;
932 left_conflict_subnodes_size = subnodes[0].left_conflict_subnodes_size;
933 conflict_size
934 += (left_conflict_subnodes_size
935 + MIN (subnodes[0].max_node_impact - left_conflict_subnodes_size,
936 subnodes[0].left_conflict_size));
937 conflict_size += ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
938 data->colorable_p = conflict_size <= data->available_regs_num;
939 return data->colorable_p;
942 /* Update left conflict sizes of hard registers subnodes of allocno A
943 after removing allocno REMOVED_A with SIZE from the conflict graph.
944 Return TRUE if A is trivially colorable. */
945 static bool
946 update_left_conflict_sizes_p (ira_allocno_t a,
947 ira_allocno_t removed_a, int size)
949 int i, conflict_size, before_conflict_size, diff, start;
950 int node_preorder_num, parent_i;
951 allocno_hard_regs_node_t node, removed_node, parent;
952 allocno_hard_regs_subnode_t subnodes;
953 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
955 ira_assert (! data->colorable_p);
956 node = data->hard_regs_node;
957 node_preorder_num = node->preorder_num;
958 removed_node = ALLOCNO_COLOR_DATA (removed_a)->hard_regs_node;
959 ira_assert (hard_reg_set_subset_p (removed_node->hard_regs->set,
960 node->hard_regs->set)
961 || hard_reg_set_subset_p (node->hard_regs->set,
962 removed_node->hard_regs->set));
963 start = node_preorder_num * allocno_hard_regs_nodes_num;
964 i = allocno_hard_regs_subnode_index[start + removed_node->preorder_num];
965 if (i < 0)
966 i = 0;
967 subnodes = allocno_hard_regs_subnodes + data->hard_regs_subnodes_start;
968 before_conflict_size
969 = (subnodes[i].left_conflict_subnodes_size
970 + MIN (subnodes[i].max_node_impact
971 - subnodes[i].left_conflict_subnodes_size,
972 subnodes[i].left_conflict_size));
973 subnodes[i].left_conflict_size -= size;
974 for (;;)
976 conflict_size
977 = (subnodes[i].left_conflict_subnodes_size
978 + MIN (subnodes[i].max_node_impact
979 - subnodes[i].left_conflict_subnodes_size,
980 subnodes[i].left_conflict_size));
981 if ((diff = before_conflict_size - conflict_size) == 0)
982 break;
983 ira_assert (conflict_size < before_conflict_size);
984 parent = allocno_hard_regs_nodes[i + node_preorder_num]->parent;
985 if (parent == NULL)
986 break;
987 parent_i
988 = allocno_hard_regs_subnode_index[start + parent->preorder_num];
989 if (parent_i < 0)
990 break;
991 i = parent_i;
992 before_conflict_size
993 = (subnodes[i].left_conflict_subnodes_size
994 + MIN (subnodes[i].max_node_impact
995 - subnodes[i].left_conflict_subnodes_size,
996 subnodes[i].left_conflict_size));
997 subnodes[i].left_conflict_subnodes_size -= diff;
999 if (i != 0
1000 || (conflict_size
1001 + ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
1002 > data->available_regs_num))
1003 return false;
1004 data->colorable_p = true;
1005 return true;
1008 /* Return true if allocno A has empty profitable hard regs. */
1009 static bool
1010 empty_profitable_hard_regs (ira_allocno_t a)
1012 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
1014 return hard_reg_set_empty_p (data->profitable_hard_regs);
1017 /* Set up profitable hard registers for each allocno being
1018 colored. */
1019 static void
1020 setup_profitable_hard_regs (void)
1022 unsigned int i;
1023 int j, k, nobj, hard_regno, nregs, class_size;
1024 ira_allocno_t a;
1025 bitmap_iterator bi;
1026 enum reg_class aclass;
1027 enum machine_mode mode;
1028 allocno_color_data_t data;
1030 /* Initial set up from allocno classes and explicitly conflicting
1031 hard regs. */
1032 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1034 a = ira_allocnos[i];
1035 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS)
1036 continue;
1037 data = ALLOCNO_COLOR_DATA (a);
1038 if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL
1039 && ALLOCNO_CLASS_COST (a) > ALLOCNO_MEMORY_COST (a))
1040 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1041 else
1043 mode = ALLOCNO_MODE (a);
1044 COPY_HARD_REG_SET (data->profitable_hard_regs,
1045 ira_useful_class_mode_regs[aclass][mode]);
1046 nobj = ALLOCNO_NUM_OBJECTS (a);
1047 for (k = 0; k < nobj; k++)
1049 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1051 AND_COMPL_HARD_REG_SET (data->profitable_hard_regs,
1052 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1056 /* Exclude hard regs already assigned for conflicting objects. */
1057 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, i, bi)
1059 a = ira_allocnos[i];
1060 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1061 || ! ALLOCNO_ASSIGNED_P (a)
1062 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
1063 continue;
1064 mode = ALLOCNO_MODE (a);
1065 nregs = hard_regno_nregs[hard_regno][mode];
1066 nobj = ALLOCNO_NUM_OBJECTS (a);
1067 for (k = 0; k < nobj; k++)
1069 ira_object_t obj = ALLOCNO_OBJECT (a, k);
1070 ira_object_t conflict_obj;
1071 ira_object_conflict_iterator oci;
1073 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1075 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1077 /* We can process the conflict allocno repeatedly with
1078 the same result. */
1079 if (nregs == nobj && nregs > 1)
1081 int num = OBJECT_SUBWORD (conflict_obj);
1083 if (REG_WORDS_BIG_ENDIAN)
1084 CLEAR_HARD_REG_BIT
1085 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1086 hard_regno + nobj - num - 1);
1087 else
1088 CLEAR_HARD_REG_BIT
1089 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1090 hard_regno + num);
1092 else
1093 AND_COMPL_HARD_REG_SET
1094 (ALLOCNO_COLOR_DATA (conflict_a)->profitable_hard_regs,
1095 ira_reg_mode_hard_regset[hard_regno][mode]);
1099 /* Exclude too costly hard regs. */
1100 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1102 int min_cost = INT_MAX;
1103 int *costs;
1105 a = ira_allocnos[i];
1106 if ((aclass = ALLOCNO_CLASS (a)) == NO_REGS
1107 || empty_profitable_hard_regs (a))
1108 continue;
1109 data = ALLOCNO_COLOR_DATA (a);
1110 mode = ALLOCNO_MODE (a);
1111 if ((costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a)) != NULL
1112 || (costs = ALLOCNO_HARD_REG_COSTS (a)) != NULL)
1114 class_size = ira_class_hard_regs_num[aclass];
1115 for (j = 0; j < class_size; j++)
1117 hard_regno = ira_class_hard_regs[aclass][j];
1118 if (! TEST_HARD_REG_BIT (data->profitable_hard_regs,
1119 hard_regno))
1120 continue;
1121 if (ALLOCNO_UPDATED_MEMORY_COST (a) < costs[j])
1122 CLEAR_HARD_REG_BIT (data->profitable_hard_regs,
1123 hard_regno);
1124 else if (min_cost > costs[j])
1125 min_cost = costs[j];
1128 else if (ALLOCNO_UPDATED_MEMORY_COST (a)
1129 < ALLOCNO_UPDATED_CLASS_COST (a))
1130 CLEAR_HARD_REG_SET (data->profitable_hard_regs);
1131 if (ALLOCNO_UPDATED_CLASS_COST (a) > min_cost)
1132 ALLOCNO_UPDATED_CLASS_COST (a) = min_cost;
1138 /* This page contains functions used to choose hard registers for
1139 allocnos. */
1141 /* Pool for update cost records. */
1142 static alloc_pool update_cost_record_pool;
1144 /* Initiate update cost records. */
1145 static void
1146 init_update_cost_records (void)
1148 update_cost_record_pool
1149 = create_alloc_pool ("update cost records",
1150 sizeof (struct update_cost_record), 100);
1153 /* Return new update cost record with given params. */
1154 static struct update_cost_record *
1155 get_update_cost_record (int hard_regno, int divisor,
1156 struct update_cost_record *next)
1158 struct update_cost_record *record;
1160 record = (struct update_cost_record *) pool_alloc (update_cost_record_pool);
1161 record->hard_regno = hard_regno;
1162 record->divisor = divisor;
1163 record->next = next;
1164 return record;
1167 /* Free memory for all records in LIST. */
1168 static void
1169 free_update_cost_record_list (struct update_cost_record *list)
1171 struct update_cost_record *next;
1173 while (list != NULL)
1175 next = list->next;
1176 pool_free (update_cost_record_pool, list);
1177 list = next;
1181 /* Free memory allocated for all update cost records. */
1182 static void
1183 finish_update_cost_records (void)
1185 free_alloc_pool (update_cost_record_pool);
1188 /* Array whose element value is TRUE if the corresponding hard
1189 register was already allocated for an allocno. */
1190 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
1192 /* Describes one element in a queue of allocnos whose costs need to be
1193 updated. Each allocno in the queue is known to have an allocno
1194 class. */
1195 struct update_cost_queue_elem
1197 /* This element is in the queue iff CHECK == update_cost_check. */
1198 int check;
1200 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
1201 connecting this allocno to the one being allocated. */
1202 int divisor;
1204 /* Allocno from which we are chaning costs of connected allocnos.
1205 It is used not go back in graph of allocnos connected by
1206 copies. */
1207 ira_allocno_t from;
1209 /* The next allocno in the queue, or null if this is the last element. */
1210 ira_allocno_t next;
1213 /* The first element in a queue of allocnos whose copy costs need to be
1214 updated. Null if the queue is empty. */
1215 static ira_allocno_t update_cost_queue;
1217 /* The last element in the queue described by update_cost_queue.
1218 Not valid if update_cost_queue is null. */
1219 static struct update_cost_queue_elem *update_cost_queue_tail;
1221 /* A pool of elements in the queue described by update_cost_queue.
1222 Elements are indexed by ALLOCNO_NUM. */
1223 static struct update_cost_queue_elem *update_cost_queue_elems;
1225 /* The current value of update_costs_from_copies call count. */
1226 static int update_cost_check;
1228 /* Allocate and initialize data necessary for function
1229 update_costs_from_copies. */
1230 static void
1231 initiate_cost_update (void)
1233 size_t size;
1235 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
1236 update_cost_queue_elems
1237 = (struct update_cost_queue_elem *) ira_allocate (size);
1238 memset (update_cost_queue_elems, 0, size);
1239 update_cost_check = 0;
1240 init_update_cost_records ();
1243 /* Deallocate data used by function update_costs_from_copies. */
1244 static void
1245 finish_cost_update (void)
1247 ira_free (update_cost_queue_elems);
1248 finish_update_cost_records ();
1251 /* When we traverse allocnos to update hard register costs, the cost
1252 divisor will be multiplied by the following macro value for each
1253 hop from given allocno to directly connected allocnos. */
1254 #define COST_HOP_DIVISOR 4
1256 /* Start a new cost-updating pass. */
1257 static void
1258 start_update_cost (void)
1260 update_cost_check++;
1261 update_cost_queue = NULL;
1264 /* Add (ALLOCNO, FROM, DIVISOR) to the end of update_cost_queue, unless
1265 ALLOCNO is already in the queue, or has NO_REGS class. */
1266 static inline void
1267 queue_update_cost (ira_allocno_t allocno, ira_allocno_t from, int divisor)
1269 struct update_cost_queue_elem *elem;
1271 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
1272 if (elem->check != update_cost_check
1273 && ALLOCNO_CLASS (allocno) != NO_REGS)
1275 elem->check = update_cost_check;
1276 elem->from = from;
1277 elem->divisor = divisor;
1278 elem->next = NULL;
1279 if (update_cost_queue == NULL)
1280 update_cost_queue = allocno;
1281 else
1282 update_cost_queue_tail->next = allocno;
1283 update_cost_queue_tail = elem;
1287 /* Try to remove the first element from update_cost_queue. Return
1288 false if the queue was empty, otherwise make (*ALLOCNO, *FROM,
1289 *DIVISOR) describe the removed element. */
1290 static inline bool
1291 get_next_update_cost (ira_allocno_t *allocno, ira_allocno_t *from, int *divisor)
1293 struct update_cost_queue_elem *elem;
1295 if (update_cost_queue == NULL)
1296 return false;
1298 *allocno = update_cost_queue;
1299 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
1300 *from = elem->from;
1301 *divisor = elem->divisor;
1302 update_cost_queue = elem->next;
1303 return true;
1306 /* Increase costs of HARD_REGNO by UPDATE_COST for ALLOCNO. Return
1307 true if we really modified the cost. */
1308 static bool
1309 update_allocno_cost (ira_allocno_t allocno, int hard_regno, int update_cost)
1311 int i;
1312 enum reg_class aclass = ALLOCNO_CLASS (allocno);
1314 i = ira_class_hard_reg_index[aclass][hard_regno];
1315 if (i < 0)
1316 return false;
1317 ira_allocate_and_set_or_copy_costs
1318 (&ALLOCNO_UPDATED_HARD_REG_COSTS (allocno), aclass,
1319 ALLOCNO_UPDATED_CLASS_COST (allocno),
1320 ALLOCNO_HARD_REG_COSTS (allocno));
1321 ira_allocate_and_set_or_copy_costs
1322 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno),
1323 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (allocno));
1324 ALLOCNO_UPDATED_HARD_REG_COSTS (allocno)[i] += update_cost;
1325 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno)[i] += update_cost;
1326 return true;
1329 /* Update (decrease if DECR_P) HARD_REGNO cost of allocnos connected
1330 by copies to ALLOCNO to increase chances to remove some copies as
1331 the result of subsequent assignment. Record cost updates if
1332 RECORD_P is true. */
1333 static void
1334 update_costs_from_allocno (ira_allocno_t allocno, int hard_regno,
1335 int divisor, bool decr_p, bool record_p)
1337 int cost, update_cost;
1338 enum machine_mode mode;
1339 enum reg_class rclass, aclass;
1340 ira_allocno_t another_allocno, from = NULL;
1341 ira_copy_t cp, next_cp;
1343 rclass = REGNO_REG_CLASS (hard_regno);
1346 mode = ALLOCNO_MODE (allocno);
1347 ira_init_register_move_cost_if_necessary (mode);
1348 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1350 if (cp->first == allocno)
1352 next_cp = cp->next_first_allocno_copy;
1353 another_allocno = cp->second;
1355 else if (cp->second == allocno)
1357 next_cp = cp->next_second_allocno_copy;
1358 another_allocno = cp->first;
1360 else
1361 gcc_unreachable ();
1363 if (another_allocno == from)
1364 continue;
1366 aclass = ALLOCNO_CLASS (another_allocno);
1367 if (! TEST_HARD_REG_BIT (reg_class_contents[aclass],
1368 hard_regno)
1369 || ALLOCNO_ASSIGNED_P (another_allocno))
1370 continue;
1372 cost = (cp->second == allocno
1373 ? ira_register_move_cost[mode][rclass][aclass]
1374 : ira_register_move_cost[mode][aclass][rclass]);
1375 if (decr_p)
1376 cost = -cost;
1378 update_cost = cp->freq * cost / divisor;
1379 if (update_cost == 0)
1380 continue;
1382 if (! update_allocno_cost (another_allocno, hard_regno, update_cost))
1383 continue;
1384 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1385 if (record_p && ALLOCNO_COLOR_DATA (another_allocno) != NULL)
1386 ALLOCNO_COLOR_DATA (another_allocno)->update_cost_records
1387 = get_update_cost_record (hard_regno, divisor,
1388 ALLOCNO_COLOR_DATA (another_allocno)
1389 ->update_cost_records);
1392 while (get_next_update_cost (&allocno, &from, &divisor));
1395 /* Decrease preferred ALLOCNO hard register costs and costs of
1396 allocnos connected to ALLOCNO through copy. */
1397 static void
1398 update_costs_from_prefs (ira_allocno_t allocno)
1400 ira_pref_t pref;
1402 start_update_cost ();
1403 for (pref = ALLOCNO_PREFS (allocno); pref != NULL; pref = pref->next_pref)
1404 update_costs_from_allocno (allocno, pref->hard_regno,
1405 COST_HOP_DIVISOR, true, true);
1408 /* Update (decrease if DECR_P) the cost of allocnos connected to
1409 ALLOCNO through copies to increase chances to remove some copies as
1410 the result of subsequent assignment. ALLOCNO was just assigned to
1411 a hard register. Record cost updates if RECORD_P is true. */
1412 static void
1413 update_costs_from_copies (ira_allocno_t allocno, bool decr_p, bool record_p)
1415 int hard_regno;
1417 hard_regno = ALLOCNO_HARD_REGNO (allocno);
1418 ira_assert (hard_regno >= 0 && ALLOCNO_CLASS (allocno) != NO_REGS);
1419 start_update_cost ();
1420 update_costs_from_allocno (allocno, hard_regno, 1, decr_p, record_p);
1423 /* Restore costs of allocnos connected to ALLOCNO by copies as it was
1424 before updating costs of these allocnos from given allocno. This
1425 is a wise thing to do as if given allocno did not get an expected
1426 hard reg, using smaller cost of the hard reg for allocnos connected
1427 by copies to given allocno becomes actually misleading. Free all
1428 update cost records for ALLOCNO as we don't need them anymore. */
1429 static void
1430 restore_costs_from_copies (ira_allocno_t allocno)
1432 struct update_cost_record *records, *curr;
1434 if (ALLOCNO_COLOR_DATA (allocno) == NULL)
1435 return;
1436 records = ALLOCNO_COLOR_DATA (allocno)->update_cost_records;
1437 start_update_cost ();
1438 for (curr = records; curr != NULL; curr = curr->next)
1439 update_costs_from_allocno (allocno, curr->hard_regno,
1440 curr->divisor, true, false);
1441 free_update_cost_record_list (records);
1442 ALLOCNO_COLOR_DATA (allocno)->update_cost_records = NULL;
1445 /* This function updates COSTS (decrease if DECR_P) for hard_registers
1446 of ACLASS by conflict costs of the unassigned allocnos
1447 connected by copies with allocnos in update_cost_queue. This
1448 update increases chances to remove some copies. */
1449 static void
1450 update_conflict_hard_regno_costs (int *costs, enum reg_class aclass,
1451 bool decr_p)
1453 int i, cost, class_size, freq, mult, div, divisor;
1454 int index, hard_regno;
1455 int *conflict_costs;
1456 bool cont_p;
1457 enum reg_class another_aclass;
1458 ira_allocno_t allocno, another_allocno, from;
1459 ira_copy_t cp, next_cp;
1461 while (get_next_update_cost (&allocno, &from, &divisor))
1462 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
1464 if (cp->first == allocno)
1466 next_cp = cp->next_first_allocno_copy;
1467 another_allocno = cp->second;
1469 else if (cp->second == allocno)
1471 next_cp = cp->next_second_allocno_copy;
1472 another_allocno = cp->first;
1474 else
1475 gcc_unreachable ();
1477 if (another_allocno == from)
1478 continue;
1480 another_aclass = ALLOCNO_CLASS (another_allocno);
1481 if (! ira_reg_classes_intersect_p[aclass][another_aclass]
1482 || ALLOCNO_ASSIGNED_P (another_allocno)
1483 || ALLOCNO_COLOR_DATA (another_allocno)->may_be_spilled_p)
1484 continue;
1485 class_size = ira_class_hard_regs_num[another_aclass];
1486 ira_allocate_and_copy_costs
1487 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
1488 another_aclass, ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
1489 conflict_costs
1490 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
1491 if (conflict_costs == NULL)
1492 cont_p = true;
1493 else
1495 mult = cp->freq;
1496 freq = ALLOCNO_FREQ (another_allocno);
1497 if (freq == 0)
1498 freq = 1;
1499 div = freq * divisor;
1500 cont_p = false;
1501 for (i = class_size - 1; i >= 0; i--)
1503 hard_regno = ira_class_hard_regs[another_aclass][i];
1504 ira_assert (hard_regno >= 0);
1505 index = ira_class_hard_reg_index[aclass][hard_regno];
1506 if (index < 0)
1507 continue;
1508 cost = conflict_costs [i] * mult / div;
1509 if (cost == 0)
1510 continue;
1511 cont_p = true;
1512 if (decr_p)
1513 cost = -cost;
1514 costs[index] += cost;
1517 /* Probably 5 hops will be enough. */
1518 if (cont_p
1519 && divisor <= (COST_HOP_DIVISOR
1520 * COST_HOP_DIVISOR
1521 * COST_HOP_DIVISOR
1522 * COST_HOP_DIVISOR))
1523 queue_update_cost (another_allocno, allocno, divisor * COST_HOP_DIVISOR);
1527 /* Set up conflicting (through CONFLICT_REGS) for each object of
1528 allocno A and the start allocno profitable regs (through
1529 START_PROFITABLE_REGS). Remember that the start profitable regs
1530 exclude hard regs which can not hold value of mode of allocno A.
1531 This covers mostly cases when multi-register value should be
1532 aligned. */
1533 static inline void
1534 get_conflict_and_start_profitable_regs (ira_allocno_t a, bool retry_p,
1535 HARD_REG_SET *conflict_regs,
1536 HARD_REG_SET *start_profitable_regs)
1538 int i, nwords;
1539 ira_object_t obj;
1541 nwords = ALLOCNO_NUM_OBJECTS (a);
1542 for (i = 0; i < nwords; i++)
1544 obj = ALLOCNO_OBJECT (a, i);
1545 COPY_HARD_REG_SET (conflict_regs[i],
1546 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1548 if (retry_p)
1550 COPY_HARD_REG_SET (*start_profitable_regs,
1551 reg_class_contents[ALLOCNO_CLASS (a)]);
1552 AND_COMPL_HARD_REG_SET (*start_profitable_regs,
1553 ira_prohibited_class_mode_regs
1554 [ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
1556 else
1557 COPY_HARD_REG_SET (*start_profitable_regs,
1558 ALLOCNO_COLOR_DATA (a)->profitable_hard_regs);
1561 /* Return true if HARD_REGNO is ok for assigning to allocno A with
1562 PROFITABLE_REGS and whose objects have CONFLICT_REGS. */
1563 static inline bool
1564 check_hard_reg_p (ira_allocno_t a, int hard_regno,
1565 HARD_REG_SET *conflict_regs, HARD_REG_SET profitable_regs)
1567 int j, nwords, nregs;
1568 enum reg_class aclass;
1569 enum machine_mode mode;
1571 aclass = ALLOCNO_CLASS (a);
1572 mode = ALLOCNO_MODE (a);
1573 if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[aclass][mode],
1574 hard_regno))
1575 return false;
1576 /* Checking only profitable hard regs. */
1577 if (! TEST_HARD_REG_BIT (profitable_regs, hard_regno))
1578 return false;
1579 nregs = hard_regno_nregs[hard_regno][mode];
1580 nwords = ALLOCNO_NUM_OBJECTS (a);
1581 for (j = 0; j < nregs; j++)
1583 int k;
1584 int set_to_test_start = 0, set_to_test_end = nwords;
1586 if (nregs == nwords)
1588 if (REG_WORDS_BIG_ENDIAN)
1589 set_to_test_start = nwords - j - 1;
1590 else
1591 set_to_test_start = j;
1592 set_to_test_end = set_to_test_start + 1;
1594 for (k = set_to_test_start; k < set_to_test_end; k++)
1595 if (TEST_HARD_REG_BIT (conflict_regs[k], hard_regno + j))
1596 break;
1597 if (k != set_to_test_end)
1598 break;
1600 return j == nregs;
1602 #ifndef HONOR_REG_ALLOC_ORDER
1604 /* Return number of registers needed to be saved and restored at
1605 function prologue/epilogue if we allocate HARD_REGNO to hold value
1606 of MODE. */
1607 static int
1608 calculate_saved_nregs (int hard_regno, enum machine_mode mode)
1610 int i;
1611 int nregs = 0;
1613 ira_assert (hard_regno >= 0);
1614 for (i = hard_regno_nregs[hard_regno][mode] - 1; i >= 0; i--)
1615 if (!allocated_hardreg_p[hard_regno + i]
1616 && !TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + i)
1617 && !LOCAL_REGNO (hard_regno + i))
1618 nregs++;
1619 return nregs;
1621 #endif
1623 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
1624 that the function called from function
1625 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. In
1626 this case some allocno data are not defined or updated and we
1627 should not touch these data. The function returns true if we
1628 managed to assign a hard register to the allocno.
1630 To assign a hard register, first of all we calculate all conflict
1631 hard registers which can come from conflicting allocnos with
1632 already assigned hard registers. After that we find first free
1633 hard register with the minimal cost. During hard register cost
1634 calculation we take conflict hard register costs into account to
1635 give a chance for conflicting allocnos to get a better hard
1636 register in the future.
1638 If the best hard register cost is bigger than cost of memory usage
1639 for the allocno, we don't assign a hard register to given allocno
1640 at all.
1642 If we assign a hard register to the allocno, we update costs of the
1643 hard register for allocnos connected by copies to improve a chance
1644 to coalesce insns represented by the copies when we assign hard
1645 registers to the allocnos connected by the copies. */
1646 static bool
1647 assign_hard_reg (ira_allocno_t a, bool retry_p)
1649 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
1650 int i, j, hard_regno, best_hard_regno, class_size;
1651 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
1652 int *a_costs;
1653 enum reg_class aclass;
1654 enum machine_mode mode;
1655 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
1656 #ifndef HONOR_REG_ALLOC_ORDER
1657 int saved_nregs;
1658 enum reg_class rclass;
1659 int add_cost;
1660 #endif
1661 #ifdef STACK_REGS
1662 bool no_stack_reg_p;
1663 #endif
1665 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1666 get_conflict_and_start_profitable_regs (a, retry_p,
1667 conflicting_regs,
1668 &profitable_hard_regs);
1669 aclass = ALLOCNO_CLASS (a);
1670 class_size = ira_class_hard_regs_num[aclass];
1671 best_hard_regno = -1;
1672 memset (full_costs, 0, sizeof (int) * class_size);
1673 mem_cost = 0;
1674 memset (costs, 0, sizeof (int) * class_size);
1675 memset (full_costs, 0, sizeof (int) * class_size);
1676 #ifdef STACK_REGS
1677 no_stack_reg_p = false;
1678 #endif
1679 if (! retry_p)
1680 start_update_cost ();
1681 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
1683 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1684 aclass, ALLOCNO_HARD_REG_COSTS (a));
1685 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
1686 #ifdef STACK_REGS
1687 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
1688 #endif
1689 cost = ALLOCNO_UPDATED_CLASS_COST (a);
1690 for (i = 0; i < class_size; i++)
1691 if (a_costs != NULL)
1693 costs[i] += a_costs[i];
1694 full_costs[i] += a_costs[i];
1696 else
1698 costs[i] += cost;
1699 full_costs[i] += cost;
1701 nwords = ALLOCNO_NUM_OBJECTS (a);
1702 curr_allocno_process++;
1703 for (word = 0; word < nwords; word++)
1705 ira_object_t conflict_obj;
1706 ira_object_t obj = ALLOCNO_OBJECT (a, word);
1707 ira_object_conflict_iterator oci;
1709 /* Take preferences of conflicting allocnos into account. */
1710 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1712 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1713 enum reg_class conflict_aclass;
1715 /* Reload can give another class so we need to check all
1716 allocnos. */
1717 if (!retry_p
1718 && (!bitmap_bit_p (consideration_allocno_bitmap,
1719 ALLOCNO_NUM (conflict_a))
1720 || ((!ALLOCNO_ASSIGNED_P (conflict_a)
1721 || ALLOCNO_HARD_REGNO (conflict_a) < 0)
1722 && !(hard_reg_set_intersect_p
1723 (profitable_hard_regs,
1724 ALLOCNO_COLOR_DATA
1725 (conflict_a)->profitable_hard_regs)))))
1726 continue;
1727 conflict_aclass = ALLOCNO_CLASS (conflict_a);
1728 ira_assert (ira_reg_classes_intersect_p
1729 [aclass][conflict_aclass]);
1730 if (ALLOCNO_ASSIGNED_P (conflict_a))
1732 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
1733 if (hard_regno >= 0
1734 && (ira_hard_reg_set_intersection_p
1735 (hard_regno, ALLOCNO_MODE (conflict_a),
1736 reg_class_contents[aclass])))
1738 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
1739 int conflict_nregs;
1741 mode = ALLOCNO_MODE (conflict_a);
1742 conflict_nregs = hard_regno_nregs[hard_regno][mode];
1743 if (conflict_nregs == n_objects && conflict_nregs > 1)
1745 int num = OBJECT_SUBWORD (conflict_obj);
1747 if (REG_WORDS_BIG_ENDIAN)
1748 SET_HARD_REG_BIT (conflicting_regs[word],
1749 hard_regno + n_objects - num - 1);
1750 else
1751 SET_HARD_REG_BIT (conflicting_regs[word],
1752 hard_regno + num);
1754 else
1755 IOR_HARD_REG_SET
1756 (conflicting_regs[word],
1757 ira_reg_mode_hard_regset[hard_regno][mode]);
1758 if (hard_reg_set_subset_p (profitable_hard_regs,
1759 conflicting_regs[word]))
1760 goto fail;
1763 else if (! retry_p
1764 && ! ALLOCNO_COLOR_DATA (conflict_a)->may_be_spilled_p
1765 /* Don't process the conflict allocno twice. */
1766 && (ALLOCNO_COLOR_DATA (conflict_a)->last_process
1767 != curr_allocno_process))
1769 int k, *conflict_costs;
1771 ALLOCNO_COLOR_DATA (conflict_a)->last_process
1772 = curr_allocno_process;
1773 ira_allocate_and_copy_costs
1774 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
1775 conflict_aclass,
1776 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
1777 conflict_costs
1778 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
1779 if (conflict_costs != NULL)
1780 for (j = class_size - 1; j >= 0; j--)
1782 hard_regno = ira_class_hard_regs[aclass][j];
1783 ira_assert (hard_regno >= 0);
1784 k = ira_class_hard_reg_index[conflict_aclass][hard_regno];
1785 if (k < 0)
1786 continue;
1787 full_costs[j] -= conflict_costs[k];
1789 queue_update_cost (conflict_a, NULL, COST_HOP_DIVISOR);
1794 if (! retry_p)
1795 /* Take into account preferences of allocnos connected by copies to
1796 the conflict allocnos. */
1797 update_conflict_hard_regno_costs (full_costs, aclass, true);
1799 /* Take preferences of allocnos connected by copies into
1800 account. */
1801 if (! retry_p)
1803 start_update_cost ();
1804 queue_update_cost (a, NULL, COST_HOP_DIVISOR);
1805 update_conflict_hard_regno_costs (full_costs, aclass, false);
1807 min_cost = min_full_cost = INT_MAX;
1808 /* We don't care about giving callee saved registers to allocnos no
1809 living through calls because call clobbered registers are
1810 allocated first (it is usual practice to put them first in
1811 REG_ALLOC_ORDER). */
1812 mode = ALLOCNO_MODE (a);
1813 for (i = 0; i < class_size; i++)
1815 hard_regno = ira_class_hard_regs[aclass][i];
1816 #ifdef STACK_REGS
1817 if (no_stack_reg_p
1818 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
1819 continue;
1820 #endif
1821 if (! check_hard_reg_p (a, hard_regno,
1822 conflicting_regs, profitable_hard_regs))
1823 continue;
1824 cost = costs[i];
1825 full_cost = full_costs[i];
1826 #ifndef HONOR_REG_ALLOC_ORDER
1827 if ((saved_nregs = calculate_saved_nregs (hard_regno, mode)) != 0)
1828 /* We need to save/restore the hard register in
1829 epilogue/prologue. Therefore we increase the cost. */
1831 rclass = REGNO_REG_CLASS (hard_regno);
1832 add_cost = ((ira_memory_move_cost[mode][rclass][0]
1833 + ira_memory_move_cost[mode][rclass][1])
1834 * saved_nregs / hard_regno_nregs[hard_regno][mode] - 1);
1835 cost += add_cost;
1836 full_cost += add_cost;
1838 #endif
1839 if (min_cost > cost)
1840 min_cost = cost;
1841 if (min_full_cost > full_cost)
1843 min_full_cost = full_cost;
1844 best_hard_regno = hard_regno;
1845 ira_assert (hard_regno >= 0);
1848 if (min_full_cost > mem_cost)
1850 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1851 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
1852 mem_cost, min_full_cost);
1853 best_hard_regno = -1;
1855 fail:
1856 if (best_hard_regno >= 0)
1858 for (i = hard_regno_nregs[best_hard_regno][mode] - 1; i >= 0; i--)
1859 allocated_hardreg_p[best_hard_regno + i] = true;
1861 if (! retry_p)
1862 restore_costs_from_copies (a);
1863 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
1864 ALLOCNO_ASSIGNED_P (a) = true;
1865 if (best_hard_regno >= 0)
1866 update_costs_from_copies (a, true, ! retry_p);
1867 ira_assert (ALLOCNO_CLASS (a) == aclass);
1868 /* We don't need updated costs anymore: */
1869 ira_free_allocno_updated_costs (a);
1870 return best_hard_regno >= 0;
1875 /* An array used to sort copies. */
1876 static ira_copy_t *sorted_copies;
1878 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
1879 used to find a conflict for new allocnos or allocnos with the
1880 different allocno classes. */
1881 static bool
1882 allocnos_conflict_by_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
1884 rtx reg1, reg2;
1885 int i, j;
1886 int n1 = ALLOCNO_NUM_OBJECTS (a1);
1887 int n2 = ALLOCNO_NUM_OBJECTS (a2);
1889 if (a1 == a2)
1890 return false;
1891 reg1 = regno_reg_rtx[ALLOCNO_REGNO (a1)];
1892 reg2 = regno_reg_rtx[ALLOCNO_REGNO (a2)];
1893 if (reg1 != NULL && reg2 != NULL
1894 && ORIGINAL_REGNO (reg1) == ORIGINAL_REGNO (reg2))
1895 return false;
1897 for (i = 0; i < n1; i++)
1899 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
1901 for (j = 0; j < n2; j++)
1903 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
1905 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
1906 OBJECT_LIVE_RANGES (c2)))
1907 return true;
1910 return false;
1913 /* The function is used to sort copies according to their execution
1914 frequencies. */
1915 static int
1916 copy_freq_compare_func (const void *v1p, const void *v2p)
1918 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1919 int pri1, pri2;
1921 pri1 = cp1->freq;
1922 pri2 = cp2->freq;
1923 if (pri2 - pri1)
1924 return pri2 - pri1;
1926 /* If freqencies are equal, sort by copies, so that the results of
1927 qsort leave nothing to chance. */
1928 return cp1->num - cp2->num;
1933 /* Return true if any allocno from thread of A1 conflicts with any
1934 allocno from thread A2. */
1935 static bool
1936 allocno_thread_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
1938 ira_allocno_t a, conflict_a;
1940 for (a = ALLOCNO_COLOR_DATA (a2)->next_thread_allocno;;
1941 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1943 for (conflict_a = ALLOCNO_COLOR_DATA (a1)->next_thread_allocno;;
1944 conflict_a = ALLOCNO_COLOR_DATA (conflict_a)->next_thread_allocno)
1946 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
1947 return true;
1948 if (conflict_a == a1)
1949 break;
1951 if (a == a2)
1952 break;
1954 return false;
1957 /* Merge two threads given correspondingly by their first allocnos T1
1958 and T2 (more accurately merging T2 into T1). */
1959 static void
1960 merge_threads (ira_allocno_t t1, ira_allocno_t t2)
1962 ira_allocno_t a, next, last;
1964 gcc_assert (t1 != t2
1965 && ALLOCNO_COLOR_DATA (t1)->first_thread_allocno == t1
1966 && ALLOCNO_COLOR_DATA (t2)->first_thread_allocno == t2);
1967 for (last = t2, a = ALLOCNO_COLOR_DATA (t2)->next_thread_allocno;;
1968 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
1970 ALLOCNO_COLOR_DATA (a)->first_thread_allocno = t1;
1971 if (a == t2)
1972 break;
1973 last = a;
1975 next = ALLOCNO_COLOR_DATA (t1)->next_thread_allocno;
1976 ALLOCNO_COLOR_DATA (t1)->next_thread_allocno = t2;
1977 ALLOCNO_COLOR_DATA (last)->next_thread_allocno = next;
1978 ALLOCNO_COLOR_DATA (t1)->thread_freq += ALLOCNO_COLOR_DATA (t2)->thread_freq;
1981 /* Create threads by processing CP_NUM copies from sorted)ciopeis. We
1982 process the most expensive copies first. */
1983 static void
1984 form_threads_from_copies (int cp_num)
1986 ira_allocno_t a, thread1, thread2;
1987 ira_copy_t cp;
1988 int i, n;
1990 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
1991 /* Form threads processing copies, most frequently executed
1992 first. */
1993 for (; cp_num != 0;)
1995 for (i = 0; i < cp_num; i++)
1997 cp = sorted_copies[i];
1998 thread1 = ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno;
1999 thread2 = ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno;
2000 if (thread1 == thread2)
2001 continue;
2002 if (! allocno_thread_conflict_p (thread1, thread2))
2004 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2005 fprintf
2006 (ira_dump_file,
2007 " Forming thread by copy %d:a%dr%d-a%dr%d (freq=%d):\n",
2008 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2009 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2010 cp->freq);
2011 merge_threads (thread1, thread2);
2012 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2014 thread1 = ALLOCNO_COLOR_DATA (thread1)->first_thread_allocno;
2015 fprintf (ira_dump_file, " Result (freq=%d): a%dr%d(%d)",
2016 ALLOCNO_COLOR_DATA (thread1)->thread_freq,
2017 ALLOCNO_NUM (thread1), ALLOCNO_REGNO (thread1),
2018 ALLOCNO_FREQ (thread1));
2019 for (a = ALLOCNO_COLOR_DATA (thread1)->next_thread_allocno;
2020 a != thread1;
2021 a = ALLOCNO_COLOR_DATA (a)->next_thread_allocno)
2022 fprintf (ira_dump_file, " a%dr%d(%d)",
2023 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2024 ALLOCNO_FREQ (a));
2025 fprintf (ira_dump_file, "\n");
2027 i++;
2028 break;
2031 /* Collect the rest of copies. */
2032 for (n = 0; i < cp_num; i++)
2034 cp = sorted_copies[i];
2035 if (ALLOCNO_COLOR_DATA (cp->first)->first_thread_allocno
2036 != ALLOCNO_COLOR_DATA (cp->second)->first_thread_allocno)
2037 sorted_copies[n++] = cp;
2039 cp_num = n;
2043 /* Create threads by processing copies of all alocnos from BUCKET. We
2044 process the most expensive copies first. */
2045 static void
2046 form_threads_from_bucket (ira_allocno_t bucket)
2048 ira_allocno_t a;
2049 ira_copy_t cp, next_cp;
2050 int cp_num = 0;
2052 for (a = bucket; a != NULL; a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2054 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2056 if (cp->first == a)
2058 next_cp = cp->next_first_allocno_copy;
2059 sorted_copies[cp_num++] = cp;
2061 else if (cp->second == a)
2062 next_cp = cp->next_second_allocno_copy;
2063 else
2064 gcc_unreachable ();
2067 form_threads_from_copies (cp_num);
2070 /* Create threads by processing copies of colorable allocno A. We
2071 process most expensive copies first. */
2072 static void
2073 form_threads_from_colorable_allocno (ira_allocno_t a)
2075 ira_allocno_t another_a;
2076 ira_copy_t cp, next_cp;
2077 int cp_num = 0;
2079 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2081 if (cp->first == a)
2083 next_cp = cp->next_first_allocno_copy;
2084 another_a = cp->second;
2086 else if (cp->second == a)
2088 next_cp = cp->next_second_allocno_copy;
2089 another_a = cp->first;
2091 else
2092 gcc_unreachable ();
2093 if ((! ALLOCNO_COLOR_DATA (another_a)->in_graph_p
2094 && !ALLOCNO_COLOR_DATA (another_a)->may_be_spilled_p)
2095 || ALLOCNO_COLOR_DATA (another_a)->colorable_p)
2096 sorted_copies[cp_num++] = cp;
2098 form_threads_from_copies (cp_num);
2101 /* Form initial threads which contain only one allocno. */
2102 static void
2103 init_allocno_threads (void)
2105 ira_allocno_t a;
2106 unsigned int j;
2107 bitmap_iterator bi;
2109 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2111 a = ira_allocnos[j];
2112 /* Set up initial thread data: */
2113 ALLOCNO_COLOR_DATA (a)->first_thread_allocno
2114 = ALLOCNO_COLOR_DATA (a)->next_thread_allocno = a;
2115 ALLOCNO_COLOR_DATA (a)->thread_freq = ALLOCNO_FREQ (a);
2121 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
2123 /* Bucket of allocnos that can colored currently without spilling. */
2124 static ira_allocno_t colorable_allocno_bucket;
2126 /* Bucket of allocnos that might be not colored currently without
2127 spilling. */
2128 static ira_allocno_t uncolorable_allocno_bucket;
2130 /* The current number of allocnos in the uncolorable_bucket. */
2131 static int uncolorable_allocnos_num;
2133 /* Return the current spill priority of allocno A. The less the
2134 number, the more preferable the allocno for spilling. */
2135 static inline int
2136 allocno_spill_priority (ira_allocno_t a)
2138 allocno_color_data_t data = ALLOCNO_COLOR_DATA (a);
2140 return (data->temp
2141 / (ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2142 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]
2143 + 1));
2146 /* Add allocno A to bucket *BUCKET_PTR. A should be not in a bucket
2147 before the call. */
2148 static void
2149 add_allocno_to_bucket (ira_allocno_t a, ira_allocno_t *bucket_ptr)
2151 ira_allocno_t first_a;
2152 allocno_color_data_t data;
2154 if (bucket_ptr == &uncolorable_allocno_bucket
2155 && ALLOCNO_CLASS (a) != NO_REGS)
2157 uncolorable_allocnos_num++;
2158 ira_assert (uncolorable_allocnos_num > 0);
2160 first_a = *bucket_ptr;
2161 data = ALLOCNO_COLOR_DATA (a);
2162 data->next_bucket_allocno = first_a;
2163 data->prev_bucket_allocno = NULL;
2164 if (first_a != NULL)
2165 ALLOCNO_COLOR_DATA (first_a)->prev_bucket_allocno = a;
2166 *bucket_ptr = a;
2169 /* Compare two allocnos to define which allocno should be pushed first
2170 into the coloring stack. If the return is a negative number, the
2171 allocno given by the first parameter will be pushed first. In this
2172 case such allocno has less priority than the second one and the
2173 hard register will be assigned to it after assignment to the second
2174 one. As the result of such assignment order, the second allocno
2175 has a better chance to get the best hard register. */
2176 static int
2177 bucket_allocno_compare_func (const void *v1p, const void *v2p)
2179 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2180 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2181 int diff, freq1, freq2, a1_num, a2_num;
2182 ira_allocno_t t1 = ALLOCNO_COLOR_DATA (a1)->first_thread_allocno;
2183 ira_allocno_t t2 = ALLOCNO_COLOR_DATA (a2)->first_thread_allocno;
2184 int cl1 = ALLOCNO_CLASS (a1), cl2 = ALLOCNO_CLASS (a2);
2186 freq1 = ALLOCNO_COLOR_DATA (t1)->thread_freq;
2187 freq2 = ALLOCNO_COLOR_DATA (t2)->thread_freq;
2188 if ((diff = freq1 - freq2) != 0)
2189 return diff;
2191 if ((diff = ALLOCNO_NUM (t2) - ALLOCNO_NUM (t1)) != 0)
2192 return diff;
2194 /* Push pseudos requiring less hard registers first. It means that
2195 we will assign pseudos requiring more hard registers first
2196 avoiding creation small holes in free hard register file into
2197 which the pseudos requiring more hard registers can not fit. */
2198 if ((diff = (ira_reg_class_max_nregs[cl1][ALLOCNO_MODE (a1)]
2199 - ira_reg_class_max_nregs[cl2][ALLOCNO_MODE (a2)])) != 0)
2200 return diff;
2202 freq1 = ALLOCNO_FREQ (a1);
2203 freq2 = ALLOCNO_FREQ (a2);
2204 if ((diff = freq1 - freq2) != 0)
2205 return diff;
2207 a1_num = ALLOCNO_COLOR_DATA (a1)->available_regs_num;
2208 a2_num = ALLOCNO_COLOR_DATA (a2)->available_regs_num;
2209 if ((diff = a2_num - a1_num) != 0)
2210 return diff;
2211 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2214 /* Sort bucket *BUCKET_PTR and return the result through
2215 BUCKET_PTR. */
2216 static void
2217 sort_bucket (ira_allocno_t *bucket_ptr,
2218 int (*compare_func) (const void *, const void *))
2220 ira_allocno_t a, head;
2221 int n;
2223 for (n = 0, a = *bucket_ptr;
2224 a != NULL;
2225 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2226 sorted_allocnos[n++] = a;
2227 if (n <= 1)
2228 return;
2229 qsort (sorted_allocnos, n, sizeof (ira_allocno_t), compare_func);
2230 head = NULL;
2231 for (n--; n >= 0; n--)
2233 a = sorted_allocnos[n];
2234 ALLOCNO_COLOR_DATA (a)->next_bucket_allocno = head;
2235 ALLOCNO_COLOR_DATA (a)->prev_bucket_allocno = NULL;
2236 if (head != NULL)
2237 ALLOCNO_COLOR_DATA (head)->prev_bucket_allocno = a;
2238 head = a;
2240 *bucket_ptr = head;
2243 /* Add ALLOCNO to colorable bucket maintaining the order according
2244 their priority. ALLOCNO should be not in a bucket before the
2245 call. */
2246 static void
2247 add_allocno_to_ordered_colorable_bucket (ira_allocno_t allocno)
2249 ira_allocno_t before, after;
2251 form_threads_from_colorable_allocno (allocno);
2252 for (before = colorable_allocno_bucket, after = NULL;
2253 before != NULL;
2254 after = before,
2255 before = ALLOCNO_COLOR_DATA (before)->next_bucket_allocno)
2256 if (bucket_allocno_compare_func (&allocno, &before) < 0)
2257 break;
2258 ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno = before;
2259 ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno = after;
2260 if (after == NULL)
2261 colorable_allocno_bucket = allocno;
2262 else
2263 ALLOCNO_COLOR_DATA (after)->next_bucket_allocno = allocno;
2264 if (before != NULL)
2265 ALLOCNO_COLOR_DATA (before)->prev_bucket_allocno = allocno;
2268 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
2269 the call. */
2270 static void
2271 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
2273 ira_allocno_t prev_allocno, next_allocno;
2275 if (bucket_ptr == &uncolorable_allocno_bucket
2276 && ALLOCNO_CLASS (allocno) != NO_REGS)
2278 uncolorable_allocnos_num--;
2279 ira_assert (uncolorable_allocnos_num >= 0);
2281 prev_allocno = ALLOCNO_COLOR_DATA (allocno)->prev_bucket_allocno;
2282 next_allocno = ALLOCNO_COLOR_DATA (allocno)->next_bucket_allocno;
2283 if (prev_allocno != NULL)
2284 ALLOCNO_COLOR_DATA (prev_allocno)->next_bucket_allocno = next_allocno;
2285 else
2287 ira_assert (*bucket_ptr == allocno);
2288 *bucket_ptr = next_allocno;
2290 if (next_allocno != NULL)
2291 ALLOCNO_COLOR_DATA (next_allocno)->prev_bucket_allocno = prev_allocno;
2294 /* Put allocno A onto the coloring stack without removing it from its
2295 bucket. Pushing allocno to the coloring stack can result in moving
2296 conflicting allocnos from the uncolorable bucket to the colorable
2297 one. */
2298 static void
2299 push_allocno_to_stack (ira_allocno_t a)
2301 enum reg_class aclass;
2302 allocno_color_data_t data, conflict_data;
2303 int size, i, n = ALLOCNO_NUM_OBJECTS (a);
2305 data = ALLOCNO_COLOR_DATA (a);
2306 data->in_graph_p = false;
2307 allocno_stack_vec.safe_push (a);
2308 aclass = ALLOCNO_CLASS (a);
2309 if (aclass == NO_REGS)
2310 return;
2311 size = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2312 if (n > 1)
2314 /* We will deal with the subwords individually. */
2315 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
2316 size = 1;
2318 for (i = 0; i < n; i++)
2320 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2321 ira_object_t conflict_obj;
2322 ira_object_conflict_iterator oci;
2324 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2326 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2328 conflict_data = ALLOCNO_COLOR_DATA (conflict_a);
2329 if (conflict_data->colorable_p
2330 || ! conflict_data->in_graph_p
2331 || ALLOCNO_ASSIGNED_P (conflict_a)
2332 || !(hard_reg_set_intersect_p
2333 (ALLOCNO_COLOR_DATA (a)->profitable_hard_regs,
2334 conflict_data->profitable_hard_regs)))
2335 continue;
2336 ira_assert (bitmap_bit_p (coloring_allocno_bitmap,
2337 ALLOCNO_NUM (conflict_a)));
2338 if (update_left_conflict_sizes_p (conflict_a, a, size))
2340 delete_allocno_from_bucket
2341 (conflict_a, &uncolorable_allocno_bucket);
2342 add_allocno_to_ordered_colorable_bucket (conflict_a);
2343 if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2345 fprintf (ira_dump_file, " Making");
2346 ira_print_expanded_allocno (conflict_a);
2347 fprintf (ira_dump_file, " colorable\n");
2355 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
2356 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
2357 static void
2358 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
2360 if (colorable_p)
2361 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
2362 else
2363 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
2364 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2366 fprintf (ira_dump_file, " Pushing");
2367 ira_print_expanded_allocno (allocno);
2368 if (colorable_p)
2369 fprintf (ira_dump_file, "(cost %d)\n",
2370 ALLOCNO_COLOR_DATA (allocno)->temp);
2371 else
2372 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
2373 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
2374 allocno_spill_priority (allocno),
2375 ALLOCNO_COLOR_DATA (allocno)->temp);
2377 if (! colorable_p)
2378 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p = true;
2379 push_allocno_to_stack (allocno);
2382 /* Put all allocnos from colorable bucket onto the coloring stack. */
2383 static void
2384 push_only_colorable (void)
2386 form_threads_from_bucket (colorable_allocno_bucket);
2387 sort_bucket (&colorable_allocno_bucket, bucket_allocno_compare_func);
2388 for (;colorable_allocno_bucket != NULL;)
2389 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
2392 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
2393 loop given by its LOOP_NODE. */
2395 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
2397 int freq, i;
2398 edge_iterator ei;
2399 edge e;
2400 vec<edge> edges;
2402 ira_assert (current_loops != NULL && loop_node->loop != NULL
2403 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
2404 freq = 0;
2405 if (! exit_p)
2407 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
2408 if (e->src != loop_node->loop->latch
2409 && (regno < 0
2410 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2411 && bitmap_bit_p (df_get_live_in (e->dest), regno))))
2412 freq += EDGE_FREQUENCY (e);
2414 else
2416 edges = get_loop_exit_edges (loop_node->loop);
2417 FOR_EACH_VEC_ELT (edges, i, e)
2418 if (regno < 0
2419 || (bitmap_bit_p (df_get_live_out (e->src), regno)
2420 && bitmap_bit_p (df_get_live_in (e->dest), regno)))
2421 freq += EDGE_FREQUENCY (e);
2422 edges.release ();
2425 return REG_FREQ_FROM_EDGE_FREQ (freq);
2428 /* Calculate and return the cost of putting allocno A into memory. */
2429 static int
2430 calculate_allocno_spill_cost (ira_allocno_t a)
2432 int regno, cost;
2433 enum machine_mode mode;
2434 enum reg_class rclass;
2435 ira_allocno_t parent_allocno;
2436 ira_loop_tree_node_t parent_node, loop_node;
2438 regno = ALLOCNO_REGNO (a);
2439 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_CLASS_COST (a);
2440 if (ALLOCNO_CAP (a) != NULL)
2441 return cost;
2442 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2443 if ((parent_node = loop_node->parent) == NULL)
2444 return cost;
2445 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
2446 return cost;
2447 mode = ALLOCNO_MODE (a);
2448 rclass = ALLOCNO_CLASS (a);
2449 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
2450 cost -= (ira_memory_move_cost[mode][rclass][0]
2451 * ira_loop_edge_freq (loop_node, regno, true)
2452 + ira_memory_move_cost[mode][rclass][1]
2453 * ira_loop_edge_freq (loop_node, regno, false));
2454 else
2456 ira_init_register_move_cost_if_necessary (mode);
2457 cost += ((ira_memory_move_cost[mode][rclass][1]
2458 * ira_loop_edge_freq (loop_node, regno, true)
2459 + ira_memory_move_cost[mode][rclass][0]
2460 * ira_loop_edge_freq (loop_node, regno, false))
2461 - (ira_register_move_cost[mode][rclass][rclass]
2462 * (ira_loop_edge_freq (loop_node, regno, false)
2463 + ira_loop_edge_freq (loop_node, regno, true))));
2465 return cost;
2468 /* Used for sorting allocnos for spilling. */
2469 static inline int
2470 allocno_spill_priority_compare (ira_allocno_t a1, ira_allocno_t a2)
2472 int pri1, pri2, diff;
2474 if (ALLOCNO_BAD_SPILL_P (a1) && ! ALLOCNO_BAD_SPILL_P (a2))
2475 return 1;
2476 if (ALLOCNO_BAD_SPILL_P (a2) && ! ALLOCNO_BAD_SPILL_P (a1))
2477 return -1;
2478 pri1 = allocno_spill_priority (a1);
2479 pri2 = allocno_spill_priority (a2);
2480 if ((diff = pri1 - pri2) != 0)
2481 return diff;
2482 if ((diff
2483 = ALLOCNO_COLOR_DATA (a1)->temp - ALLOCNO_COLOR_DATA (a2)->temp) != 0)
2484 return diff;
2485 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2488 /* Used for sorting allocnos for spilling. */
2489 static int
2490 allocno_spill_sort_compare (const void *v1p, const void *v2p)
2492 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2493 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2495 return allocno_spill_priority_compare (p1, p2);
2498 /* Push allocnos to the coloring stack. The order of allocnos in the
2499 stack defines the order for the subsequent coloring. */
2500 static void
2501 push_allocnos_to_stack (void)
2503 ira_allocno_t a;
2504 int cost;
2506 /* Calculate uncolorable allocno spill costs. */
2507 for (a = uncolorable_allocno_bucket;
2508 a != NULL;
2509 a = ALLOCNO_COLOR_DATA (a)->next_bucket_allocno)
2510 if (ALLOCNO_CLASS (a) != NO_REGS)
2512 cost = calculate_allocno_spill_cost (a);
2513 /* ??? Remove cost of copies between the coalesced
2514 allocnos. */
2515 ALLOCNO_COLOR_DATA (a)->temp = cost;
2517 sort_bucket (&uncolorable_allocno_bucket, allocno_spill_sort_compare);
2518 for (;;)
2520 push_only_colorable ();
2521 a = uncolorable_allocno_bucket;
2522 if (a == NULL)
2523 break;
2524 remove_allocno_from_bucket_and_push (a, false);
2526 ira_assert (colorable_allocno_bucket == NULL
2527 && uncolorable_allocno_bucket == NULL);
2528 ira_assert (uncolorable_allocnos_num == 0);
2531 /* Pop the coloring stack and assign hard registers to the popped
2532 allocnos. */
2533 static void
2534 pop_allocnos_from_stack (void)
2536 ira_allocno_t allocno;
2537 enum reg_class aclass;
2539 for (;allocno_stack_vec.length () != 0;)
2541 allocno = allocno_stack_vec.pop ();
2542 aclass = ALLOCNO_CLASS (allocno);
2543 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2545 fprintf (ira_dump_file, " Popping");
2546 ira_print_expanded_allocno (allocno);
2547 fprintf (ira_dump_file, " -- ");
2549 if (aclass == NO_REGS)
2551 ALLOCNO_HARD_REGNO (allocno) = -1;
2552 ALLOCNO_ASSIGNED_P (allocno) = true;
2553 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
2554 ira_assert
2555 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
2556 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2557 fprintf (ira_dump_file, "assign memory\n");
2559 else if (assign_hard_reg (allocno, false))
2561 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2562 fprintf (ira_dump_file, "assign reg %d\n",
2563 ALLOCNO_HARD_REGNO (allocno));
2565 else if (ALLOCNO_ASSIGNED_P (allocno))
2567 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2568 fprintf (ira_dump_file, "spill%s\n",
2569 ALLOCNO_COLOR_DATA (allocno)->may_be_spilled_p
2570 ? "" : "!");
2572 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2576 /* Set up number of available hard registers for allocno A. */
2577 static void
2578 setup_allocno_available_regs_num (ira_allocno_t a)
2580 int i, n, hard_regno, hard_regs_num, nwords;
2581 enum reg_class aclass;
2582 allocno_color_data_t data;
2584 aclass = ALLOCNO_CLASS (a);
2585 data = ALLOCNO_COLOR_DATA (a);
2586 data->available_regs_num = 0;
2587 if (aclass == NO_REGS)
2588 return;
2589 hard_regs_num = ira_class_hard_regs_num[aclass];
2590 nwords = ALLOCNO_NUM_OBJECTS (a);
2591 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
2593 hard_regno = ira_class_hard_regs[aclass][i];
2594 /* Checking only profitable hard regs. */
2595 if (TEST_HARD_REG_BIT (data->profitable_hard_regs, hard_regno))
2596 n++;
2598 data->available_regs_num = n;
2599 if (internal_flag_ira_verbose <= 2 || ira_dump_file == NULL)
2600 return;
2601 fprintf
2602 (ira_dump_file,
2603 " Allocno a%dr%d of %s(%d) has %d avail. regs ",
2604 ALLOCNO_NUM (a), ALLOCNO_REGNO (a),
2605 reg_class_names[aclass], ira_class_hard_regs_num[aclass], n);
2606 print_hard_reg_set (ira_dump_file, data->profitable_hard_regs, false);
2607 fprintf (ira_dump_file, ", %snode: ",
2608 hard_reg_set_equal_p (data->profitable_hard_regs,
2609 data->hard_regs_node->hard_regs->set)
2610 ? "" : "^");
2611 print_hard_reg_set (ira_dump_file,
2612 data->hard_regs_node->hard_regs->set, false);
2613 for (i = 0; i < nwords; i++)
2615 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2617 if (nwords != 1)
2619 if (i != 0)
2620 fprintf (ira_dump_file, ", ");
2621 fprintf (ira_dump_file, " obj %d", i);
2623 fprintf (ira_dump_file, " (confl regs = ");
2624 print_hard_reg_set (ira_dump_file, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2625 false);
2626 fprintf (ira_dump_file, ")");
2628 fprintf (ira_dump_file, "\n");
2631 /* Put ALLOCNO in a bucket corresponding to its number and size of its
2632 conflicting allocnos and hard registers. */
2633 static void
2634 put_allocno_into_bucket (ira_allocno_t allocno)
2636 ALLOCNO_COLOR_DATA (allocno)->in_graph_p = true;
2637 setup_allocno_available_regs_num (allocno);
2638 if (setup_left_conflict_sizes_p (allocno))
2639 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
2640 else
2641 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
2644 /* Map: allocno number -> allocno priority. */
2645 static int *allocno_priorities;
2647 /* Set up priorities for N allocnos in array
2648 CONSIDERATION_ALLOCNOS. */
2649 static void
2650 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
2652 int i, length, nrefs, priority, max_priority, mult;
2653 ira_allocno_t a;
2655 max_priority = 0;
2656 for (i = 0; i < n; i++)
2658 a = consideration_allocnos[i];
2659 nrefs = ALLOCNO_NREFS (a);
2660 ira_assert (nrefs >= 0);
2661 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
2662 ira_assert (mult >= 0);
2663 allocno_priorities[ALLOCNO_NUM (a)]
2664 = priority
2665 = (mult
2666 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a))
2667 * ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
2668 if (priority < 0)
2669 priority = -priority;
2670 if (max_priority < priority)
2671 max_priority = priority;
2673 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
2674 for (i = 0; i < n; i++)
2676 a = consideration_allocnos[i];
2677 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2678 if (ALLOCNO_NUM_OBJECTS (a) > 1)
2679 length /= ALLOCNO_NUM_OBJECTS (a);
2680 if (length <= 0)
2681 length = 1;
2682 allocno_priorities[ALLOCNO_NUM (a)]
2683 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
2687 /* Sort allocnos according to the profit of usage of a hard register
2688 instead of memory for them. */
2689 static int
2690 allocno_cost_compare_func (const void *v1p, const void *v2p)
2692 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
2693 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
2694 int c1, c2;
2696 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_CLASS_COST (p1);
2697 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_CLASS_COST (p2);
2698 if (c1 - c2)
2699 return c1 - c2;
2701 /* If regs are equally good, sort by allocno numbers, so that the
2702 results of qsort leave nothing to chance. */
2703 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
2706 /* We used Chaitin-Briggs coloring to assign as many pseudos as
2707 possible to hard registers. Let us try to improve allocation with
2708 cost point of view. This function improves the allocation by
2709 spilling some allocnos and assigning the freed hard registers to
2710 other allocnos if it decreases the overall allocation cost. */
2711 static void
2712 improve_allocation (void)
2714 unsigned int i;
2715 int j, k, n, hregno, conflict_hregno, base_cost, class_size, word, nwords;
2716 int check, spill_cost, min_cost, nregs, conflict_nregs, r, best;
2717 bool try_p;
2718 enum reg_class aclass;
2719 enum machine_mode mode;
2720 int *allocno_costs;
2721 int costs[FIRST_PSEUDO_REGISTER];
2722 HARD_REG_SET conflicting_regs[2], profitable_hard_regs;
2723 ira_allocno_t a;
2724 bitmap_iterator bi;
2726 /* Clear counts used to process conflicting allocnos only once for
2727 each allocno. */
2728 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2729 ALLOCNO_COLOR_DATA (ira_allocnos[i])->temp = 0;
2730 check = n = 0;
2731 /* Process each allocno and try to assign a hard register to it by
2732 spilling some its conflicting allocnos. */
2733 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2735 a = ira_allocnos[i];
2736 ALLOCNO_COLOR_DATA (a)->temp = 0;
2737 if (empty_profitable_hard_regs (a))
2738 continue;
2739 check++;
2740 aclass = ALLOCNO_CLASS (a);
2741 allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
2742 if (allocno_costs == NULL)
2743 allocno_costs = ALLOCNO_HARD_REG_COSTS (a);
2744 if ((hregno = ALLOCNO_HARD_REGNO (a)) < 0)
2745 base_cost = ALLOCNO_UPDATED_MEMORY_COST (a);
2746 else if (allocno_costs == NULL)
2747 /* It means that assigning a hard register is not profitable
2748 (we don't waste memory for hard register costs in this
2749 case). */
2750 continue;
2751 else
2752 base_cost = allocno_costs[ira_class_hard_reg_index[aclass][hregno]];
2753 try_p = false;
2754 get_conflict_and_start_profitable_regs (a, false,
2755 conflicting_regs,
2756 &profitable_hard_regs);
2757 class_size = ira_class_hard_regs_num[aclass];
2758 /* Set up cost improvement for usage of each profitable hard
2759 register for allocno A. */
2760 for (j = 0; j < class_size; j++)
2762 hregno = ira_class_hard_regs[aclass][j];
2763 if (! check_hard_reg_p (a, hregno,
2764 conflicting_regs, profitable_hard_regs))
2765 continue;
2766 ira_assert (ira_class_hard_reg_index[aclass][hregno] == j);
2767 k = allocno_costs == NULL ? 0 : j;
2768 costs[hregno] = (allocno_costs == NULL
2769 ? ALLOCNO_UPDATED_CLASS_COST (a) : allocno_costs[k]);
2770 costs[hregno] -= base_cost;
2771 if (costs[hregno] < 0)
2772 try_p = true;
2774 if (! try_p)
2775 /* There is no chance to improve the allocation cost by
2776 assigning hard register to allocno A even without spilling
2777 conflicting allocnos. */
2778 continue;
2779 mode = ALLOCNO_MODE (a);
2780 nwords = ALLOCNO_NUM_OBJECTS (a);
2781 /* Process each allocno conflicting with A and update the cost
2782 improvement for profitable hard registers of A. To use a
2783 hard register for A we need to spill some conflicting
2784 allocnos and that creates penalty for the cost
2785 improvement. */
2786 for (word = 0; word < nwords; word++)
2788 ira_object_t conflict_obj;
2789 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2790 ira_object_conflict_iterator oci;
2792 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2794 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2796 if (ALLOCNO_COLOR_DATA (conflict_a)->temp == check)
2797 /* We already processed this conflicting allocno
2798 because we processed earlier another object of the
2799 conflicting allocno. */
2800 continue;
2801 ALLOCNO_COLOR_DATA (conflict_a)->temp = check;
2802 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2803 continue;
2804 spill_cost = ALLOCNO_UPDATED_MEMORY_COST (conflict_a);
2805 k = (ira_class_hard_reg_index
2806 [ALLOCNO_CLASS (conflict_a)][conflict_hregno]);
2807 ira_assert (k >= 0);
2808 if ((allocno_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (conflict_a))
2809 != NULL)
2810 spill_cost -= allocno_costs[k];
2811 else if ((allocno_costs = ALLOCNO_HARD_REG_COSTS (conflict_a))
2812 != NULL)
2813 spill_cost -= allocno_costs[k];
2814 else
2815 spill_cost -= ALLOCNO_UPDATED_CLASS_COST (conflict_a);
2816 conflict_nregs
2817 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2818 for (r = conflict_hregno;
2819 r >= 0 && r + hard_regno_nregs[r][mode] > conflict_hregno;
2820 r--)
2821 if (check_hard_reg_p (a, r,
2822 conflicting_regs, profitable_hard_regs))
2823 costs[r] += spill_cost;
2824 for (r = conflict_hregno + 1;
2825 r < conflict_hregno + conflict_nregs;
2826 r++)
2827 if (check_hard_reg_p (a, r,
2828 conflicting_regs, profitable_hard_regs))
2829 costs[r] += spill_cost;
2832 min_cost = INT_MAX;
2833 best = -1;
2834 /* Now we choose hard register for A which results in highest
2835 allocation cost improvement. */
2836 for (j = 0; j < class_size; j++)
2838 hregno = ira_class_hard_regs[aclass][j];
2839 if (check_hard_reg_p (a, hregno,
2840 conflicting_regs, profitable_hard_regs)
2841 && min_cost > costs[hregno])
2843 best = hregno;
2844 min_cost = costs[hregno];
2847 if (min_cost >= 0)
2848 /* We are in a situation when assigning any hard register to A
2849 by spilling some conflicting allocnos does not improve the
2850 allocation cost. */
2851 continue;
2852 nregs = hard_regno_nregs[best][mode];
2853 /* Now spill conflicting allocnos which contain a hard register
2854 of A when we assign the best chosen hard register to it. */
2855 for (word = 0; word < nwords; word++)
2857 ira_object_t conflict_obj;
2858 ira_object_t obj = ALLOCNO_OBJECT (a, word);
2859 ira_object_conflict_iterator oci;
2861 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2863 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2865 if ((conflict_hregno = ALLOCNO_HARD_REGNO (conflict_a)) < 0)
2866 continue;
2867 conflict_nregs
2868 = hard_regno_nregs[conflict_hregno][ALLOCNO_MODE (conflict_a)];
2869 if (best + nregs <= conflict_hregno
2870 || conflict_hregno + conflict_nregs <= best)
2871 /* No intersection. */
2872 continue;
2873 ALLOCNO_HARD_REGNO (conflict_a) = -1;
2874 sorted_allocnos[n++] = conflict_a;
2875 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2876 fprintf (ira_dump_file, "Spilling a%dr%d for a%dr%d\n",
2877 ALLOCNO_NUM (conflict_a), ALLOCNO_REGNO (conflict_a),
2878 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2881 /* Assign the best chosen hard register to A. */
2882 ALLOCNO_HARD_REGNO (a) = best;
2883 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2884 fprintf (ira_dump_file, "Assigning %d to a%dr%d\n",
2885 best, ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2887 if (n == 0)
2888 return;
2889 /* We spilled some allocnos to assign their hard registers to other
2890 allocnos. The spilled allocnos are now in array
2891 'sorted_allocnos'. There is still a possibility that some of the
2892 spilled allocnos can get hard registers. So let us try assign
2893 them hard registers again (just a reminder -- function
2894 'assign_hard_reg' assigns hard registers only if it is possible
2895 and profitable). We process the spilled allocnos with biggest
2896 benefit to get hard register first -- see function
2897 'allocno_cost_compare_func'. */
2898 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
2899 allocno_cost_compare_func);
2900 for (j = 0; j < n; j++)
2902 a = sorted_allocnos[j];
2903 ALLOCNO_ASSIGNED_P (a) = false;
2904 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2906 fprintf (ira_dump_file, " ");
2907 ira_print_expanded_allocno (a);
2908 fprintf (ira_dump_file, " -- ");
2910 if (assign_hard_reg (a, false))
2912 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2913 fprintf (ira_dump_file, "assign hard reg %d\n",
2914 ALLOCNO_HARD_REGNO (a));
2916 else
2918 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2919 fprintf (ira_dump_file, "assign memory\n");
2924 /* Sort allocnos according to their priorities. */
2925 static int
2926 allocno_priority_compare_func (const void *v1p, const void *v2p)
2928 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2929 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2930 int pri1, pri2;
2932 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
2933 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
2934 if (pri2 != pri1)
2935 return SORTGT (pri2, pri1);
2937 /* If regs are equally good, sort by allocnos, so that the results of
2938 qsort leave nothing to chance. */
2939 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2942 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
2943 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
2944 static void
2945 color_allocnos (void)
2947 unsigned int i, n;
2948 bitmap_iterator bi;
2949 ira_allocno_t a;
2951 setup_profitable_hard_regs ();
2952 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2954 int l, nr;
2955 HARD_REG_SET conflict_hard_regs;
2956 allocno_color_data_t data;
2957 ira_pref_t pref, next_pref;
2959 a = ira_allocnos[i];
2960 nr = ALLOCNO_NUM_OBJECTS (a);
2961 CLEAR_HARD_REG_SET (conflict_hard_regs);
2962 for (l = 0; l < nr; l++)
2964 ira_object_t obj = ALLOCNO_OBJECT (a, l);
2965 IOR_HARD_REG_SET (conflict_hard_regs,
2966 OBJECT_CONFLICT_HARD_REGS (obj));
2968 data = ALLOCNO_COLOR_DATA (a);
2969 for (pref = ALLOCNO_PREFS (a); pref != NULL; pref = next_pref)
2971 next_pref = pref->next_pref;
2972 if (! ira_hard_reg_in_set_p (pref->hard_regno,
2973 ALLOCNO_MODE (a),
2974 data->profitable_hard_regs))
2975 ira_remove_pref (pref);
2978 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
2980 n = 0;
2981 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
2983 a = ira_allocnos[i];
2984 if (ALLOCNO_CLASS (a) == NO_REGS)
2986 ALLOCNO_HARD_REGNO (a) = -1;
2987 ALLOCNO_ASSIGNED_P (a) = true;
2988 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2989 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2990 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2992 fprintf (ira_dump_file, " Spill");
2993 ira_print_expanded_allocno (a);
2994 fprintf (ira_dump_file, "\n");
2996 continue;
2998 sorted_allocnos[n++] = a;
3000 if (n != 0)
3002 setup_allocno_priorities (sorted_allocnos, n);
3003 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
3004 allocno_priority_compare_func);
3005 for (i = 0; i < n; i++)
3007 a = sorted_allocnos[i];
3008 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3010 fprintf (ira_dump_file, " ");
3011 ira_print_expanded_allocno (a);
3012 fprintf (ira_dump_file, " -- ");
3014 if (assign_hard_reg (a, false))
3016 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3017 fprintf (ira_dump_file, "assign hard reg %d\n",
3018 ALLOCNO_HARD_REGNO (a));
3020 else
3022 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3023 fprintf (ira_dump_file, "assign memory\n");
3028 else
3030 form_allocno_hard_regs_nodes_forest ();
3031 if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3032 print_hard_regs_forest (ira_dump_file);
3033 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3035 a = ira_allocnos[i];
3036 if (ALLOCNO_CLASS (a) != NO_REGS && ! empty_profitable_hard_regs (a))
3038 ALLOCNO_COLOR_DATA (a)->in_graph_p = true;
3039 update_costs_from_prefs (a);
3041 else
3043 ALLOCNO_HARD_REGNO (a) = -1;
3044 ALLOCNO_ASSIGNED_P (a) = true;
3045 /* We don't need updated costs anymore. */
3046 ira_free_allocno_updated_costs (a);
3047 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3049 fprintf (ira_dump_file, " Spill");
3050 ira_print_expanded_allocno (a);
3051 fprintf (ira_dump_file, "\n");
3055 /* Put the allocnos into the corresponding buckets. */
3056 colorable_allocno_bucket = NULL;
3057 uncolorable_allocno_bucket = NULL;
3058 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
3060 a = ira_allocnos[i];
3061 if (ALLOCNO_COLOR_DATA (a)->in_graph_p)
3062 put_allocno_into_bucket (a);
3064 push_allocnos_to_stack ();
3065 pop_allocnos_from_stack ();
3066 finish_allocno_hard_regs_nodes_forest ();
3068 improve_allocation ();
3073 /* Output information about the loop given by its LOOP_TREE_NODE. */
3074 static void
3075 print_loop_title (ira_loop_tree_node_t loop_tree_node)
3077 unsigned int j;
3078 bitmap_iterator bi;
3079 ira_loop_tree_node_t subloop_node, dest_loop_node;
3080 edge e;
3081 edge_iterator ei;
3083 if (loop_tree_node->parent == NULL)
3084 fprintf (ira_dump_file,
3085 "\n Loop 0 (parent -1, header bb%d, depth 0)\n bbs:",
3086 NUM_FIXED_BLOCKS);
3087 else
3089 ira_assert (current_loops != NULL && loop_tree_node->loop != NULL);
3090 fprintf (ira_dump_file,
3091 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
3092 loop_tree_node->loop_num, loop_tree_node->parent->loop_num,
3093 loop_tree_node->loop->header->index,
3094 loop_depth (loop_tree_node->loop));
3096 for (subloop_node = loop_tree_node->children;
3097 subloop_node != NULL;
3098 subloop_node = subloop_node->next)
3099 if (subloop_node->bb != NULL)
3101 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
3102 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
3103 if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3104 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
3105 != loop_tree_node))
3106 fprintf (ira_dump_file, "(->%d:l%d)",
3107 e->dest->index, dest_loop_node->loop_num);
3109 fprintf (ira_dump_file, "\n all:");
3110 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3111 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3112 fprintf (ira_dump_file, "\n modified regnos:");
3113 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
3114 fprintf (ira_dump_file, " %d", j);
3115 fprintf (ira_dump_file, "\n border:");
3116 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
3117 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
3118 fprintf (ira_dump_file, "\n Pressure:");
3119 for (j = 0; (int) j < ira_pressure_classes_num; j++)
3121 enum reg_class pclass;
3123 pclass = ira_pressure_classes[j];
3124 if (loop_tree_node->reg_pressure[pclass] == 0)
3125 continue;
3126 fprintf (ira_dump_file, " %s=%d", reg_class_names[pclass],
3127 loop_tree_node->reg_pressure[pclass]);
3129 fprintf (ira_dump_file, "\n");
3132 /* Color the allocnos inside loop (in the extreme case it can be all
3133 of the function) given the corresponding LOOP_TREE_NODE. The
3134 function is called for each loop during top-down traverse of the
3135 loop tree. */
3136 static void
3137 color_pass (ira_loop_tree_node_t loop_tree_node)
3139 int regno, hard_regno, index = -1, n;
3140 int cost, exit_freq, enter_freq;
3141 unsigned int j;
3142 bitmap_iterator bi;
3143 enum machine_mode mode;
3144 enum reg_class rclass, aclass, pclass;
3145 ira_allocno_t a, subloop_allocno;
3146 ira_loop_tree_node_t subloop_node;
3148 ira_assert (loop_tree_node->bb == NULL);
3149 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3150 print_loop_title (loop_tree_node);
3152 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
3153 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
3154 n = 0;
3155 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3157 a = ira_allocnos[j];
3158 n++;
3159 if (! ALLOCNO_ASSIGNED_P (a))
3160 continue;
3161 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
3163 allocno_color_data
3164 = (allocno_color_data_t) ira_allocate (sizeof (struct allocno_color_data)
3165 * n);
3166 memset (allocno_color_data, 0, sizeof (struct allocno_color_data) * n);
3167 curr_allocno_process = 0;
3168 n = 0;
3169 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3171 a = ira_allocnos[j];
3172 ALLOCNO_ADD_DATA (a) = allocno_color_data + n;
3173 n++;
3175 init_allocno_threads ();
3176 /* Color all mentioned allocnos including transparent ones. */
3177 color_allocnos ();
3178 /* Process caps. They are processed just once. */
3179 if (flag_ira_region == IRA_REGION_MIXED
3180 || flag_ira_region == IRA_REGION_ALL)
3181 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
3183 a = ira_allocnos[j];
3184 if (ALLOCNO_CAP_MEMBER (a) == NULL)
3185 continue;
3186 /* Remove from processing in the next loop. */
3187 bitmap_clear_bit (consideration_allocno_bitmap, j);
3188 rclass = ALLOCNO_CLASS (a);
3189 pclass = ira_pressure_class_translate[rclass];
3190 if (flag_ira_region == IRA_REGION_MIXED
3191 && (loop_tree_node->reg_pressure[pclass]
3192 <= ira_class_hard_regs_num[pclass]))
3194 mode = ALLOCNO_MODE (a);
3195 hard_regno = ALLOCNO_HARD_REGNO (a);
3196 if (hard_regno >= 0)
3198 index = ira_class_hard_reg_index[rclass][hard_regno];
3199 ira_assert (index >= 0);
3201 regno = ALLOCNO_REGNO (a);
3202 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
3203 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
3204 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
3205 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3206 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3207 if (hard_regno >= 0)
3208 update_costs_from_copies (subloop_allocno, true, true);
3209 /* We don't need updated costs anymore: */
3210 ira_free_allocno_updated_costs (subloop_allocno);
3213 /* Update costs of the corresponding allocnos (not caps) in the
3214 subloops. */
3215 for (subloop_node = loop_tree_node->subloops;
3216 subloop_node != NULL;
3217 subloop_node = subloop_node->subloop_next)
3219 ira_assert (subloop_node->bb == NULL);
3220 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3222 a = ira_allocnos[j];
3223 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
3224 mode = ALLOCNO_MODE (a);
3225 rclass = ALLOCNO_CLASS (a);
3226 pclass = ira_pressure_class_translate[rclass];
3227 hard_regno = ALLOCNO_HARD_REGNO (a);
3228 /* Use hard register class here. ??? */
3229 if (hard_regno >= 0)
3231 index = ira_class_hard_reg_index[rclass][hard_regno];
3232 ira_assert (index >= 0);
3234 regno = ALLOCNO_REGNO (a);
3235 /* ??? conflict costs */
3236 subloop_allocno = subloop_node->regno_allocno_map[regno];
3237 if (subloop_allocno == NULL
3238 || ALLOCNO_CAP (subloop_allocno) != NULL)
3239 continue;
3240 ira_assert (ALLOCNO_CLASS (subloop_allocno) == rclass);
3241 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
3242 ALLOCNO_NUM (subloop_allocno)));
3243 if ((flag_ira_region == IRA_REGION_MIXED)
3244 && (loop_tree_node->reg_pressure[pclass]
3245 <= ira_class_hard_regs_num[pclass]))
3247 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3249 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3250 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3251 if (hard_regno >= 0)
3252 update_costs_from_copies (subloop_allocno, true, true);
3253 /* We don't need updated costs anymore: */
3254 ira_free_allocno_updated_costs (subloop_allocno);
3256 continue;
3258 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3259 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3260 ira_assert (regno < ira_reg_equiv_len);
3261 if (ira_equiv_no_lvalue_p (regno))
3263 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
3265 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
3266 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
3267 if (hard_regno >= 0)
3268 update_costs_from_copies (subloop_allocno, true, true);
3269 /* We don't need updated costs anymore: */
3270 ira_free_allocno_updated_costs (subloop_allocno);
3273 else if (hard_regno < 0)
3275 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3276 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
3277 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
3279 else
3281 aclass = ALLOCNO_CLASS (subloop_allocno);
3282 ira_init_register_move_cost_if_necessary (mode);
3283 cost = (ira_register_move_cost[mode][rclass][rclass]
3284 * (exit_freq + enter_freq));
3285 ira_allocate_and_set_or_copy_costs
3286 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), aclass,
3287 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno),
3288 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
3289 ira_allocate_and_set_or_copy_costs
3290 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
3291 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
3292 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
3293 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
3294 -= cost;
3295 if (ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3296 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
3297 ALLOCNO_UPDATED_CLASS_COST (subloop_allocno)
3298 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
3299 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
3300 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
3301 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
3305 ira_free (allocno_color_data);
3306 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
3308 a = ira_allocnos[j];
3309 ALLOCNO_ADD_DATA (a) = NULL;
3313 /* Initialize the common data for coloring and calls functions to do
3314 Chaitin-Briggs and regional coloring. */
3315 static void
3316 do_coloring (void)
3318 coloring_allocno_bitmap = ira_allocate_bitmap ();
3319 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3320 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
3322 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
3324 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3325 ira_print_disposition (ira_dump_file);
3327 ira_free_bitmap (coloring_allocno_bitmap);
3332 /* Move spill/restore code, which are to be generated in ira-emit.c,
3333 to less frequent points (if it is profitable) by reassigning some
3334 allocnos (in loop with subloops containing in another loop) to
3335 memory which results in longer live-range where the corresponding
3336 pseudo-registers will be in memory. */
3337 static void
3338 move_spill_restore (void)
3340 int cost, regno, hard_regno, hard_regno2, index;
3341 bool changed_p;
3342 int enter_freq, exit_freq;
3343 enum machine_mode mode;
3344 enum reg_class rclass;
3345 ira_allocno_t a, parent_allocno, subloop_allocno;
3346 ira_loop_tree_node_t parent, loop_node, subloop_node;
3347 ira_allocno_iterator ai;
3349 for (;;)
3351 changed_p = false;
3352 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3353 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
3354 FOR_EACH_ALLOCNO (a, ai)
3356 regno = ALLOCNO_REGNO (a);
3357 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
3358 if (ALLOCNO_CAP_MEMBER (a) != NULL
3359 || ALLOCNO_CAP (a) != NULL
3360 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
3361 || loop_node->children == NULL
3362 /* don't do the optimization because it can create
3363 copies and the reload pass can spill the allocno set
3364 by copy although the allocno will not get memory
3365 slot. */
3366 || ira_equiv_no_lvalue_p (regno)
3367 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
3368 continue;
3369 mode = ALLOCNO_MODE (a);
3370 rclass = ALLOCNO_CLASS (a);
3371 index = ira_class_hard_reg_index[rclass][hard_regno];
3372 ira_assert (index >= 0);
3373 cost = (ALLOCNO_MEMORY_COST (a)
3374 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
3375 ? ALLOCNO_CLASS_COST (a)
3376 : ALLOCNO_HARD_REG_COSTS (a)[index]));
3377 ira_init_register_move_cost_if_necessary (mode);
3378 for (subloop_node = loop_node->subloops;
3379 subloop_node != NULL;
3380 subloop_node = subloop_node->subloop_next)
3382 ira_assert (subloop_node->bb == NULL);
3383 subloop_allocno = subloop_node->regno_allocno_map[regno];
3384 if (subloop_allocno == NULL)
3385 continue;
3386 ira_assert (rclass == ALLOCNO_CLASS (subloop_allocno));
3387 /* We have accumulated cost. To get the real cost of
3388 allocno usage in the loop we should subtract costs of
3389 the subloop allocnos. */
3390 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
3391 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
3392 ? ALLOCNO_CLASS_COST (subloop_allocno)
3393 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
3394 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
3395 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
3396 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
3397 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3398 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3399 else
3401 cost
3402 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
3403 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3404 if (hard_regno2 != hard_regno)
3405 cost -= (ira_register_move_cost[mode][rclass][rclass]
3406 * (exit_freq + enter_freq));
3409 if ((parent = loop_node->parent) != NULL
3410 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
3412 ira_assert (rclass == ALLOCNO_CLASS (parent_allocno));
3413 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
3414 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
3415 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
3416 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
3417 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
3418 else
3420 cost
3421 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
3422 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
3423 if (hard_regno2 != hard_regno)
3424 cost -= (ira_register_move_cost[mode][rclass][rclass]
3425 * (exit_freq + enter_freq));
3428 if (cost < 0)
3430 ALLOCNO_HARD_REGNO (a) = -1;
3431 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3433 fprintf
3434 (ira_dump_file,
3435 " Moving spill/restore for a%dr%d up from loop %d",
3436 ALLOCNO_NUM (a), regno, loop_node->loop_num);
3437 fprintf (ira_dump_file, " - profit %d\n", -cost);
3439 changed_p = true;
3442 if (! changed_p)
3443 break;
3449 /* Update current hard reg costs and current conflict hard reg costs
3450 for allocno A. It is done by processing its copies containing
3451 other allocnos already assigned. */
3452 static void
3453 update_curr_costs (ira_allocno_t a)
3455 int i, hard_regno, cost;
3456 enum machine_mode mode;
3457 enum reg_class aclass, rclass;
3458 ira_allocno_t another_a;
3459 ira_copy_t cp, next_cp;
3461 ira_free_allocno_updated_costs (a);
3462 ira_assert (! ALLOCNO_ASSIGNED_P (a));
3463 aclass = ALLOCNO_CLASS (a);
3464 if (aclass == NO_REGS)
3465 return;
3466 mode = ALLOCNO_MODE (a);
3467 ira_init_register_move_cost_if_necessary (mode);
3468 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3470 if (cp->first == a)
3472 next_cp = cp->next_first_allocno_copy;
3473 another_a = cp->second;
3475 else if (cp->second == a)
3477 next_cp = cp->next_second_allocno_copy;
3478 another_a = cp->first;
3480 else
3481 gcc_unreachable ();
3482 if (! ira_reg_classes_intersect_p[aclass][ALLOCNO_CLASS (another_a)]
3483 || ! ALLOCNO_ASSIGNED_P (another_a)
3484 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
3485 continue;
3486 rclass = REGNO_REG_CLASS (hard_regno);
3487 i = ira_class_hard_reg_index[aclass][hard_regno];
3488 if (i < 0)
3489 continue;
3490 cost = (cp->first == a
3491 ? ira_register_move_cost[mode][rclass][aclass]
3492 : ira_register_move_cost[mode][aclass][rclass]);
3493 ira_allocate_and_set_or_copy_costs
3494 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a),
3495 ALLOCNO_HARD_REG_COSTS (a));
3496 ira_allocate_and_set_or_copy_costs
3497 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
3498 aclass, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
3499 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3500 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
3504 /* Try to assign hard registers to the unassigned allocnos and
3505 allocnos conflicting with them or conflicting with allocnos whose
3506 regno >= START_REGNO. The function is called after ira_flattening,
3507 so more allocnos (including ones created in ira-emit.c) will have a
3508 chance to get a hard register. We use simple assignment algorithm
3509 based on priorities. */
3510 void
3511 ira_reassign_conflict_allocnos (int start_regno)
3513 int i, allocnos_to_color_num;
3514 ira_allocno_t a;
3515 enum reg_class aclass;
3516 bitmap allocnos_to_color;
3517 ira_allocno_iterator ai;
3519 allocnos_to_color = ira_allocate_bitmap ();
3520 allocnos_to_color_num = 0;
3521 FOR_EACH_ALLOCNO (a, ai)
3523 int n = ALLOCNO_NUM_OBJECTS (a);
3525 if (! ALLOCNO_ASSIGNED_P (a)
3526 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
3528 if (ALLOCNO_CLASS (a) != NO_REGS)
3529 sorted_allocnos[allocnos_to_color_num++] = a;
3530 else
3532 ALLOCNO_ASSIGNED_P (a) = true;
3533 ALLOCNO_HARD_REGNO (a) = -1;
3534 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
3535 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
3537 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
3539 if (ALLOCNO_REGNO (a) < start_regno
3540 || (aclass = ALLOCNO_CLASS (a)) == NO_REGS)
3541 continue;
3542 for (i = 0; i < n; i++)
3544 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3545 ira_object_t conflict_obj;
3546 ira_object_conflict_iterator oci;
3548 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3550 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3552 ira_assert (ira_reg_classes_intersect_p
3553 [aclass][ALLOCNO_CLASS (conflict_a)]);
3554 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
3555 continue;
3556 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
3560 ira_free_bitmap (allocnos_to_color);
3561 if (allocnos_to_color_num > 1)
3563 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
3564 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
3565 allocno_priority_compare_func);
3567 for (i = 0; i < allocnos_to_color_num; i++)
3569 a = sorted_allocnos[i];
3570 ALLOCNO_ASSIGNED_P (a) = false;
3571 update_curr_costs (a);
3573 for (i = 0; i < allocnos_to_color_num; i++)
3575 a = sorted_allocnos[i];
3576 if (assign_hard_reg (a, true))
3578 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3579 fprintf
3580 (ira_dump_file,
3581 " Secondary allocation: assign hard reg %d to reg %d\n",
3582 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
3589 /* This page contains functions used to find conflicts using allocno
3590 live ranges. */
3592 #ifdef ENABLE_IRA_CHECKING
3594 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
3595 intersect. This should be used when there is only one region.
3596 Currently this is used during reload. */
3597 static bool
3598 conflict_by_live_ranges_p (int regno1, int regno2)
3600 ira_allocno_t a1, a2;
3602 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
3603 && regno2 >= FIRST_PSEUDO_REGISTER);
3604 /* Reg info caclulated by dataflow infrastructure can be different
3605 from one calculated by regclass. */
3606 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
3607 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
3608 return false;
3609 return allocnos_conflict_by_live_ranges_p (a1, a2);
3612 #endif
3616 /* This page contains code to coalesce memory stack slots used by
3617 spilled allocnos. This results in smaller stack frame, better data
3618 locality, and in smaller code for some architectures like
3619 x86/x86_64 where insn size depends on address displacement value.
3620 On the other hand, it can worsen insn scheduling after the RA but
3621 in practice it is less important than smaller stack frames. */
3623 /* TRUE if we coalesced some allocnos. In other words, if we got
3624 loops formed by members first_coalesced_allocno and
3625 next_coalesced_allocno containing more one allocno. */
3626 static bool allocno_coalesced_p;
3628 /* Bitmap used to prevent a repeated allocno processing because of
3629 coalescing. */
3630 static bitmap processed_coalesced_allocno_bitmap;
3632 /* See below. */
3633 typedef struct coalesce_data *coalesce_data_t;
3635 /* To decrease footprint of ira_allocno structure we store all data
3636 needed only for coalescing in the following structure. */
3637 struct coalesce_data
3639 /* Coalesced allocnos form a cyclic list. One allocno given by
3640 FIRST represents all coalesced allocnos. The
3641 list is chained by NEXT. */
3642 ira_allocno_t first;
3643 ira_allocno_t next;
3644 int temp;
3647 /* Container for storing allocno data concerning coalescing. */
3648 static coalesce_data_t allocno_coalesce_data;
3650 /* Macro to access the data concerning coalescing. */
3651 #define ALLOCNO_COALESCE_DATA(a) ((coalesce_data_t) ALLOCNO_ADD_DATA (a))
3653 /* Merge two sets of coalesced allocnos given correspondingly by
3654 allocnos A1 and A2 (more accurately merging A2 set into A1
3655 set). */
3656 static void
3657 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
3659 ira_allocno_t a, first, last, next;
3661 first = ALLOCNO_COALESCE_DATA (a1)->first;
3662 a = ALLOCNO_COALESCE_DATA (a2)->first;
3663 if (first == a)
3664 return;
3665 for (last = a2, a = ALLOCNO_COALESCE_DATA (a2)->next;;
3666 a = ALLOCNO_COALESCE_DATA (a)->next)
3668 ALLOCNO_COALESCE_DATA (a)->first = first;
3669 if (a == a2)
3670 break;
3671 last = a;
3673 next = allocno_coalesce_data[ALLOCNO_NUM (first)].next;
3674 allocno_coalesce_data[ALLOCNO_NUM (first)].next = a2;
3675 allocno_coalesce_data[ALLOCNO_NUM (last)].next = next;
3678 /* Return TRUE if there are conflicting allocnos from two sets of
3679 coalesced allocnos given correspondingly by allocnos A1 and A2. We
3680 use live ranges to find conflicts because conflicts are represented
3681 only for allocnos of the same allocno class and during the reload
3682 pass we coalesce allocnos for sharing stack memory slots. */
3683 static bool
3684 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
3686 ira_allocno_t a, conflict_a;
3688 if (allocno_coalesced_p)
3690 bitmap_clear (processed_coalesced_allocno_bitmap);
3691 for (a = ALLOCNO_COALESCE_DATA (a1)->next;;
3692 a = ALLOCNO_COALESCE_DATA (a)->next)
3694 bitmap_set_bit (processed_coalesced_allocno_bitmap, ALLOCNO_NUM (a));
3695 if (a == a1)
3696 break;
3699 for (a = ALLOCNO_COALESCE_DATA (a2)->next;;
3700 a = ALLOCNO_COALESCE_DATA (a)->next)
3702 for (conflict_a = ALLOCNO_COALESCE_DATA (a1)->next;;
3703 conflict_a = ALLOCNO_COALESCE_DATA (conflict_a)->next)
3705 if (allocnos_conflict_by_live_ranges_p (a, conflict_a))
3706 return true;
3707 if (conflict_a == a1)
3708 break;
3710 if (a == a2)
3711 break;
3713 return false;
3716 /* The major function for aggressive allocno coalescing. We coalesce
3717 only spilled allocnos. If some allocnos have been coalesced, we
3718 set up flag allocno_coalesced_p. */
3719 static void
3720 coalesce_allocnos (void)
3722 ira_allocno_t a;
3723 ira_copy_t cp, next_cp;
3724 unsigned int j;
3725 int i, n, cp_num, regno;
3726 bitmap_iterator bi;
3728 cp_num = 0;
3729 /* Collect copies. */
3730 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
3732 a = ira_allocnos[j];
3733 regno = ALLOCNO_REGNO (a);
3734 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
3735 || ira_equiv_no_lvalue_p (regno))
3736 continue;
3737 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
3739 if (cp->first == a)
3741 next_cp = cp->next_first_allocno_copy;
3742 regno = ALLOCNO_REGNO (cp->second);
3743 /* For priority coloring we coalesce allocnos only with
3744 the same allocno class not with intersected allocno
3745 classes as it were possible. It is done for
3746 simplicity. */
3747 if ((cp->insn != NULL || cp->constraint_p)
3748 && ALLOCNO_ASSIGNED_P (cp->second)
3749 && ALLOCNO_HARD_REGNO (cp->second) < 0
3750 && ! ira_equiv_no_lvalue_p (regno))
3751 sorted_copies[cp_num++] = cp;
3753 else if (cp->second == a)
3754 next_cp = cp->next_second_allocno_copy;
3755 else
3756 gcc_unreachable ();
3759 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
3760 /* Coalesced copies, most frequently executed first. */
3761 for (; cp_num != 0;)
3763 for (i = 0; i < cp_num; i++)
3765 cp = sorted_copies[i];
3766 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
3768 allocno_coalesced_p = true;
3769 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3770 fprintf
3771 (ira_dump_file,
3772 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
3773 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
3774 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
3775 cp->freq);
3776 merge_allocnos (cp->first, cp->second);
3777 i++;
3778 break;
3781 /* Collect the rest of copies. */
3782 for (n = 0; i < cp_num; i++)
3784 cp = sorted_copies[i];
3785 if (allocno_coalesce_data[ALLOCNO_NUM (cp->first)].first
3786 != allocno_coalesce_data[ALLOCNO_NUM (cp->second)].first)
3787 sorted_copies[n++] = cp;
3789 cp_num = n;
3793 /* Usage cost and order number of coalesced allocno set to which
3794 given pseudo register belongs to. */
3795 static int *regno_coalesced_allocno_cost;
3796 static int *regno_coalesced_allocno_num;
3798 /* Sort pseudos according frequencies of coalesced allocno sets they
3799 belong to (putting most frequently ones first), and according to
3800 coalesced allocno set order numbers. */
3801 static int
3802 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
3804 const int regno1 = *(const int *) v1p;
3805 const int regno2 = *(const int *) v2p;
3806 int diff;
3808 if ((diff = (regno_coalesced_allocno_cost[regno2]
3809 - regno_coalesced_allocno_cost[regno1])) != 0)
3810 return diff;
3811 if ((diff = (regno_coalesced_allocno_num[regno1]
3812 - regno_coalesced_allocno_num[regno2])) != 0)
3813 return diff;
3814 return regno1 - regno2;
3817 /* Widest width in which each pseudo reg is referred to (via subreg).
3818 It is used for sorting pseudo registers. */
3819 static unsigned int *regno_max_ref_width;
3821 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
3822 #ifdef STACK_GROWS_DOWNWARD
3823 # undef STACK_GROWS_DOWNWARD
3824 # define STACK_GROWS_DOWNWARD 1
3825 #else
3826 # define STACK_GROWS_DOWNWARD 0
3827 #endif
3829 /* Sort pseudos according their slot numbers (putting ones with
3830 smaller numbers first, or last when the frame pointer is not
3831 needed). */
3832 static int
3833 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
3835 const int regno1 = *(const int *) v1p;
3836 const int regno2 = *(const int *) v2p;
3837 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
3838 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
3839 int diff, slot_num1, slot_num2;
3840 int total_size1, total_size2;
3842 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
3844 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3845 return regno1 - regno2;
3846 return 1;
3848 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
3849 return -1;
3850 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
3851 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
3852 if ((diff = slot_num1 - slot_num2) != 0)
3853 return (frame_pointer_needed
3854 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
3855 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1),
3856 regno_max_ref_width[regno1]);
3857 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2),
3858 regno_max_ref_width[regno2]);
3859 if ((diff = total_size2 - total_size1) != 0)
3860 return diff;
3861 return regno1 - regno2;
3864 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
3865 for coalesced allocno sets containing allocnos with their regnos
3866 given in array PSEUDO_REGNOS of length N. */
3867 static void
3868 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
3870 int i, num, regno, cost;
3871 ira_allocno_t allocno, a;
3873 for (num = i = 0; i < n; i++)
3875 regno = pseudo_regnos[i];
3876 allocno = ira_regno_allocno_map[regno];
3877 if (allocno == NULL)
3879 regno_coalesced_allocno_cost[regno] = 0;
3880 regno_coalesced_allocno_num[regno] = ++num;
3881 continue;
3883 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3884 continue;
3885 num++;
3886 for (cost = 0, a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3887 a = ALLOCNO_COALESCE_DATA (a)->next)
3889 cost += ALLOCNO_FREQ (a);
3890 if (a == allocno)
3891 break;
3893 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3894 a = ALLOCNO_COALESCE_DATA (a)->next)
3896 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
3897 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
3898 if (a == allocno)
3899 break;
3904 /* Collect spilled allocnos representing coalesced allocno sets (the
3905 first coalesced allocno). The collected allocnos are returned
3906 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
3907 number of the collected allocnos. The allocnos are given by their
3908 regnos in array PSEUDO_REGNOS of length N. */
3909 static int
3910 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
3911 ira_allocno_t *spilled_coalesced_allocnos)
3913 int i, num, regno;
3914 ira_allocno_t allocno;
3916 for (num = i = 0; i < n; i++)
3918 regno = pseudo_regnos[i];
3919 allocno = ira_regno_allocno_map[regno];
3920 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
3921 || ALLOCNO_COALESCE_DATA (allocno)->first != allocno)
3922 continue;
3923 spilled_coalesced_allocnos[num++] = allocno;
3925 return num;
3928 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
3929 given slot contains live ranges of coalesced allocnos assigned to
3930 given slot. */
3931 static live_range_t *slot_coalesced_allocnos_live_ranges;
3933 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
3934 ranges intersected with live ranges of coalesced allocnos assigned
3935 to slot with number N. */
3936 static bool
3937 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
3939 ira_allocno_t a;
3941 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3942 a = ALLOCNO_COALESCE_DATA (a)->next)
3944 int i;
3945 int nr = ALLOCNO_NUM_OBJECTS (a);
3947 for (i = 0; i < nr; i++)
3949 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3951 if (ira_live_ranges_intersect_p
3952 (slot_coalesced_allocnos_live_ranges[n],
3953 OBJECT_LIVE_RANGES (obj)))
3954 return true;
3956 if (a == allocno)
3957 break;
3959 return false;
3962 /* Update live ranges of slot to which coalesced allocnos represented
3963 by ALLOCNO were assigned. */
3964 static void
3965 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
3967 int i, n;
3968 ira_allocno_t a;
3969 live_range_t r;
3971 n = ALLOCNO_COALESCE_DATA (allocno)->temp;
3972 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
3973 a = ALLOCNO_COALESCE_DATA (a)->next)
3975 int nr = ALLOCNO_NUM_OBJECTS (a);
3976 for (i = 0; i < nr; i++)
3978 ira_object_t obj = ALLOCNO_OBJECT (a, i);
3980 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
3981 slot_coalesced_allocnos_live_ranges[n]
3982 = ira_merge_live_ranges
3983 (slot_coalesced_allocnos_live_ranges[n], r);
3985 if (a == allocno)
3986 break;
3990 /* We have coalesced allocnos involving in copies. Coalesce allocnos
3991 further in order to share the same memory stack slot. Allocnos
3992 representing sets of allocnos coalesced before the call are given
3993 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
3994 some allocnos were coalesced in the function. */
3995 static bool
3996 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
3998 int i, j, n, last_coalesced_allocno_num;
3999 ira_allocno_t allocno, a;
4000 bool merged_p = false;
4001 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
4003 slot_coalesced_allocnos_live_ranges
4004 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
4005 memset (slot_coalesced_allocnos_live_ranges, 0,
4006 sizeof (live_range_t) * ira_allocnos_num);
4007 last_coalesced_allocno_num = 0;
4008 /* Coalesce non-conflicting spilled allocnos preferring most
4009 frequently used. */
4010 for (i = 0; i < num; i++)
4012 allocno = spilled_coalesced_allocnos[i];
4013 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4014 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
4015 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4016 continue;
4017 for (j = 0; j < i; j++)
4019 a = spilled_coalesced_allocnos[j];
4020 n = ALLOCNO_COALESCE_DATA (a)->temp;
4021 if (ALLOCNO_COALESCE_DATA (a)->first == a
4022 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
4023 && ! ira_equiv_no_lvalue_p (ALLOCNO_REGNO (a))
4024 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
4025 break;
4027 if (j >= i)
4029 /* No coalescing: set up number for coalesced allocnos
4030 represented by ALLOCNO. */
4031 ALLOCNO_COALESCE_DATA (allocno)->temp = last_coalesced_allocno_num++;
4032 setup_slot_coalesced_allocno_live_ranges (allocno);
4034 else
4036 allocno_coalesced_p = true;
4037 merged_p = true;
4038 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4039 fprintf (ira_dump_file,
4040 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
4041 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
4042 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
4043 ALLOCNO_COALESCE_DATA (allocno)->temp
4044 = ALLOCNO_COALESCE_DATA (a)->temp;
4045 setup_slot_coalesced_allocno_live_ranges (allocno);
4046 merge_allocnos (a, allocno);
4047 ira_assert (ALLOCNO_COALESCE_DATA (a)->first == a);
4050 for (i = 0; i < ira_allocnos_num; i++)
4051 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
4052 ira_free (slot_coalesced_allocnos_live_ranges);
4053 return merged_p;
4056 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
4057 subsequent assigning stack slots to them in the reload pass. To do
4058 this we coalesce spilled allocnos first to decrease the number of
4059 memory-memory move insns. This function is called by the
4060 reload. */
4061 void
4062 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
4063 unsigned int *reg_max_ref_width)
4065 int max_regno = max_reg_num ();
4066 int i, regno, num, slot_num;
4067 ira_allocno_t allocno, a;
4068 ira_allocno_iterator ai;
4069 ira_allocno_t *spilled_coalesced_allocnos;
4071 /* Set up allocnos can be coalesced. */
4072 coloring_allocno_bitmap = ira_allocate_bitmap ();
4073 for (i = 0; i < n; i++)
4075 regno = pseudo_regnos[i];
4076 allocno = ira_regno_allocno_map[regno];
4077 if (allocno != NULL)
4078 bitmap_set_bit (coloring_allocno_bitmap, ALLOCNO_NUM (allocno));
4080 allocno_coalesced_p = false;
4081 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
4082 allocno_coalesce_data
4083 = (coalesce_data_t) ira_allocate (sizeof (struct coalesce_data)
4084 * ira_allocnos_num);
4085 /* Initialize coalesce data for allocnos. */
4086 FOR_EACH_ALLOCNO (a, ai)
4088 ALLOCNO_ADD_DATA (a) = allocno_coalesce_data + ALLOCNO_NUM (a);
4089 ALLOCNO_COALESCE_DATA (a)->first = a;
4090 ALLOCNO_COALESCE_DATA (a)->next = a;
4092 coalesce_allocnos ();
4093 ira_free_bitmap (coloring_allocno_bitmap);
4094 regno_coalesced_allocno_cost
4095 = (int *) ira_allocate (max_regno * sizeof (int));
4096 regno_coalesced_allocno_num
4097 = (int *) ira_allocate (max_regno * sizeof (int));
4098 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
4099 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4100 /* Sort regnos according frequencies of the corresponding coalesced
4101 allocno sets. */
4102 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
4103 spilled_coalesced_allocnos
4104 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
4105 * sizeof (ira_allocno_t));
4106 /* Collect allocnos representing the spilled coalesced allocno
4107 sets. */
4108 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4109 spilled_coalesced_allocnos);
4110 if (flag_ira_share_spill_slots
4111 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
4113 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
4114 qsort (pseudo_regnos, n, sizeof (int),
4115 coalesced_pseudo_reg_freq_compare);
4116 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
4117 spilled_coalesced_allocnos);
4119 ira_free_bitmap (processed_coalesced_allocno_bitmap);
4120 allocno_coalesced_p = false;
4121 /* Assign stack slot numbers to spilled allocno sets, use smaller
4122 numbers for most frequently used coalesced allocnos. -1 is
4123 reserved for dynamic search of stack slots for pseudos spilled by
4124 the reload. */
4125 slot_num = 1;
4126 for (i = 0; i < num; i++)
4128 allocno = spilled_coalesced_allocnos[i];
4129 if (ALLOCNO_COALESCE_DATA (allocno)->first != allocno
4130 || ALLOCNO_HARD_REGNO (allocno) >= 0
4131 || ira_equiv_no_lvalue_p (ALLOCNO_REGNO (allocno)))
4132 continue;
4133 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4134 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
4135 slot_num++;
4136 for (a = ALLOCNO_COALESCE_DATA (allocno)->next;;
4137 a = ALLOCNO_COALESCE_DATA (a)->next)
4139 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
4140 ALLOCNO_HARD_REGNO (a) = -slot_num;
4141 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4142 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
4143 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
4144 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
4145 reg_max_ref_width[ALLOCNO_REGNO (a)]));
4147 if (a == allocno)
4148 break;
4150 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4151 fprintf (ira_dump_file, "\n");
4153 ira_spilled_reg_stack_slots_num = slot_num - 1;
4154 ira_free (spilled_coalesced_allocnos);
4155 /* Sort regnos according the slot numbers. */
4156 regno_max_ref_width = reg_max_ref_width;
4157 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
4158 FOR_EACH_ALLOCNO (a, ai)
4159 ALLOCNO_ADD_DATA (a) = NULL;
4160 ira_free (allocno_coalesce_data);
4161 ira_free (regno_coalesced_allocno_num);
4162 ira_free (regno_coalesced_allocno_cost);
4167 /* This page contains code used by the reload pass to improve the
4168 final code. */
4170 /* The function is called from reload to mark changes in the
4171 allocation of REGNO made by the reload. Remember that reg_renumber
4172 reflects the change result. */
4173 void
4174 ira_mark_allocation_change (int regno)
4176 ira_allocno_t a = ira_regno_allocno_map[regno];
4177 int old_hard_regno, hard_regno, cost;
4178 enum reg_class aclass = ALLOCNO_CLASS (a);
4180 ira_assert (a != NULL);
4181 hard_regno = reg_renumber[regno];
4182 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
4183 return;
4184 if (old_hard_regno < 0)
4185 cost = -ALLOCNO_MEMORY_COST (a);
4186 else
4188 ira_assert (ira_class_hard_reg_index[aclass][old_hard_regno] >= 0);
4189 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
4190 ? ALLOCNO_CLASS_COST (a)
4191 : ALLOCNO_HARD_REG_COSTS (a)
4192 [ira_class_hard_reg_index[aclass][old_hard_regno]]);
4193 update_costs_from_copies (a, false, false);
4195 ira_overall_cost -= cost;
4196 ALLOCNO_HARD_REGNO (a) = hard_regno;
4197 if (hard_regno < 0)
4199 ALLOCNO_HARD_REGNO (a) = -1;
4200 cost += ALLOCNO_MEMORY_COST (a);
4202 else if (ira_class_hard_reg_index[aclass][hard_regno] >= 0)
4204 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
4205 ? ALLOCNO_CLASS_COST (a)
4206 : ALLOCNO_HARD_REG_COSTS (a)
4207 [ira_class_hard_reg_index[aclass][hard_regno]]);
4208 update_costs_from_copies (a, true, false);
4210 else
4211 /* Reload changed class of the allocno. */
4212 cost = 0;
4213 ira_overall_cost += cost;
4216 /* This function is called when reload deletes memory-memory move. In
4217 this case we marks that the allocation of the corresponding
4218 allocnos should be not changed in future. Otherwise we risk to get
4219 a wrong code. */
4220 void
4221 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
4223 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
4224 ira_allocno_t src = ira_regno_allocno_map[src_regno];
4226 ira_assert (dst != NULL && src != NULL
4227 && ALLOCNO_HARD_REGNO (dst) < 0
4228 && ALLOCNO_HARD_REGNO (src) < 0);
4229 ALLOCNO_DONT_REASSIGN_P (dst) = true;
4230 ALLOCNO_DONT_REASSIGN_P (src) = true;
4233 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
4234 allocno A and return TRUE in the case of success. */
4235 static bool
4236 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
4238 int hard_regno;
4239 enum reg_class aclass;
4240 int regno = ALLOCNO_REGNO (a);
4241 HARD_REG_SET saved[2];
4242 int i, n;
4244 n = ALLOCNO_NUM_OBJECTS (a);
4245 for (i = 0; i < n; i++)
4247 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4248 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
4249 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
4250 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
4251 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
4252 call_used_reg_set);
4254 ALLOCNO_ASSIGNED_P (a) = false;
4255 aclass = ALLOCNO_CLASS (a);
4256 update_curr_costs (a);
4257 assign_hard_reg (a, true);
4258 hard_regno = ALLOCNO_HARD_REGNO (a);
4259 reg_renumber[regno] = hard_regno;
4260 if (hard_regno < 0)
4261 ALLOCNO_HARD_REGNO (a) = -1;
4262 else
4264 ira_assert (ira_class_hard_reg_index[aclass][hard_regno] >= 0);
4265 ira_overall_cost
4266 -= (ALLOCNO_MEMORY_COST (a)
4267 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
4268 ? ALLOCNO_CLASS_COST (a)
4269 : ALLOCNO_HARD_REG_COSTS (a)[ira_class_hard_reg_index
4270 [aclass][hard_regno]]));
4271 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
4272 && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
4273 call_used_reg_set))
4275 ira_assert (flag_caller_saves);
4276 caller_save_needed = 1;
4280 /* If we found a hard register, modify the RTL for the pseudo
4281 register to show the hard register, and mark the pseudo register
4282 live. */
4283 if (reg_renumber[regno] >= 0)
4285 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4286 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
4287 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
4288 mark_home_live (regno);
4290 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4291 fprintf (ira_dump_file, "\n");
4292 for (i = 0; i < n; i++)
4294 ira_object_t obj = ALLOCNO_OBJECT (a, i);
4295 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
4297 return reg_renumber[regno] >= 0;
4300 /* Sort pseudos according their usage frequencies (putting most
4301 frequently ones first). */
4302 static int
4303 pseudo_reg_compare (const void *v1p, const void *v2p)
4305 int regno1 = *(const int *) v1p;
4306 int regno2 = *(const int *) v2p;
4307 int diff;
4309 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
4310 return diff;
4311 return regno1 - regno2;
4314 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
4315 NUM of them) or spilled pseudos conflicting with pseudos in
4316 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
4317 allocation has been changed. The function doesn't use
4318 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
4319 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
4320 is called by the reload pass at the end of each reload
4321 iteration. */
4322 bool
4323 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
4324 HARD_REG_SET bad_spill_regs,
4325 HARD_REG_SET *pseudo_forbidden_regs,
4326 HARD_REG_SET *pseudo_previous_regs,
4327 bitmap spilled)
4329 int i, n, regno;
4330 bool changed_p;
4331 ira_allocno_t a;
4332 HARD_REG_SET forbidden_regs;
4333 bitmap temp = BITMAP_ALLOC (NULL);
4335 /* Add pseudos which conflict with pseudos already in
4336 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
4337 to allocating in two steps as some of the conflicts might have
4338 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
4339 for (i = 0; i < num; i++)
4340 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
4342 for (i = 0, n = num; i < n; i++)
4344 int nr, j;
4345 int regno = spilled_pseudo_regs[i];
4346 bitmap_set_bit (temp, regno);
4348 a = ira_regno_allocno_map[regno];
4349 nr = ALLOCNO_NUM_OBJECTS (a);
4350 for (j = 0; j < nr; j++)
4352 ira_object_t conflict_obj;
4353 ira_object_t obj = ALLOCNO_OBJECT (a, j);
4354 ira_object_conflict_iterator oci;
4356 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
4358 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
4359 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
4360 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
4361 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
4363 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
4364 /* ?!? This seems wrong. */
4365 bitmap_set_bit (consideration_allocno_bitmap,
4366 ALLOCNO_NUM (conflict_a));
4372 if (num > 1)
4373 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
4374 changed_p = false;
4375 /* Try to assign hard registers to pseudos from
4376 SPILLED_PSEUDO_REGS. */
4377 for (i = 0; i < num; i++)
4379 regno = spilled_pseudo_regs[i];
4380 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
4381 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
4382 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
4383 gcc_assert (reg_renumber[regno] < 0);
4384 a = ira_regno_allocno_map[regno];
4385 ira_mark_allocation_change (regno);
4386 ira_assert (reg_renumber[regno] < 0);
4387 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
4388 fprintf (ira_dump_file,
4389 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
4390 ALLOCNO_MEMORY_COST (a)
4391 - ALLOCNO_CLASS_COST (a));
4392 allocno_reload_assign (a, forbidden_regs);
4393 if (reg_renumber[regno] >= 0)
4395 CLEAR_REGNO_REG_SET (spilled, regno);
4396 changed_p = true;
4399 BITMAP_FREE (temp);
4400 return changed_p;
4403 /* The function is called by reload and returns already allocated
4404 stack slot (if any) for REGNO with given INHERENT_SIZE and
4405 TOTAL_SIZE. In the case of failure to find a slot which can be
4406 used for REGNO, the function returns NULL. */
4408 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
4409 unsigned int total_size)
4411 unsigned int i;
4412 int slot_num, best_slot_num;
4413 int cost, best_cost;
4414 ira_copy_t cp, next_cp;
4415 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
4416 rtx x;
4417 bitmap_iterator bi;
4418 struct ira_spilled_reg_stack_slot *slot = NULL;
4420 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
4421 && inherent_size <= total_size
4422 && ALLOCNO_HARD_REGNO (allocno) < 0);
4423 if (! flag_ira_share_spill_slots)
4424 return NULL_RTX;
4425 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4426 if (slot_num != -1)
4428 slot = &ira_spilled_reg_stack_slots[slot_num];
4429 x = slot->mem;
4431 else
4433 best_cost = best_slot_num = -1;
4434 x = NULL_RTX;
4435 /* It means that the pseudo was spilled in the reload pass, try
4436 to reuse a slot. */
4437 for (slot_num = 0;
4438 slot_num < ira_spilled_reg_stack_slots_num;
4439 slot_num++)
4441 slot = &ira_spilled_reg_stack_slots[slot_num];
4442 if (slot->mem == NULL_RTX)
4443 continue;
4444 if (slot->width < total_size
4445 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
4446 continue;
4448 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4449 FIRST_PSEUDO_REGISTER, i, bi)
4451 another_allocno = ira_regno_allocno_map[i];
4452 if (allocnos_conflict_by_live_ranges_p (allocno,
4453 another_allocno))
4454 goto cont;
4456 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
4457 cp != NULL;
4458 cp = next_cp)
4460 if (cp->first == allocno)
4462 next_cp = cp->next_first_allocno_copy;
4463 another_allocno = cp->second;
4465 else if (cp->second == allocno)
4467 next_cp = cp->next_second_allocno_copy;
4468 another_allocno = cp->first;
4470 else
4471 gcc_unreachable ();
4472 if (cp->insn == NULL_RTX)
4473 continue;
4474 if (bitmap_bit_p (&slot->spilled_regs,
4475 ALLOCNO_REGNO (another_allocno)))
4476 cost += cp->freq;
4478 if (cost > best_cost)
4480 best_cost = cost;
4481 best_slot_num = slot_num;
4483 cont:
4486 if (best_cost >= 0)
4488 slot_num = best_slot_num;
4489 slot = &ira_spilled_reg_stack_slots[slot_num];
4490 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4491 x = slot->mem;
4492 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4495 if (x != NULL_RTX)
4497 ira_assert (slot->width >= total_size);
4498 #ifdef ENABLE_IRA_CHECKING
4499 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4500 FIRST_PSEUDO_REGISTER, i, bi)
4502 ira_assert (! conflict_by_live_ranges_p (regno, i));
4504 #endif
4505 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4506 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4508 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
4509 regno, REG_FREQ (regno), slot_num);
4510 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
4511 FIRST_PSEUDO_REGISTER, i, bi)
4513 if ((unsigned) regno != i)
4514 fprintf (ira_dump_file, " %d", i);
4516 fprintf (ira_dump_file, "\n");
4519 return x;
4522 /* This is called by reload every time a new stack slot X with
4523 TOTAL_SIZE was allocated for REGNO. We store this info for
4524 subsequent ira_reuse_stack_slot calls. */
4525 void
4526 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
4528 struct ira_spilled_reg_stack_slot *slot;
4529 int slot_num;
4530 ira_allocno_t allocno;
4532 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
4533 allocno = ira_regno_allocno_map[regno];
4534 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
4535 if (slot_num == -1)
4537 slot_num = ira_spilled_reg_stack_slots_num++;
4538 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
4540 slot = &ira_spilled_reg_stack_slots[slot_num];
4541 INIT_REG_SET (&slot->spilled_regs);
4542 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
4543 slot->mem = x;
4544 slot->width = total_size;
4545 if (internal_flag_ira_verbose > 3 && ira_dump_file)
4546 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
4547 regno, REG_FREQ (regno), slot_num);
4551 /* Return spill cost for pseudo-registers whose numbers are in array
4552 REGNOS (with a negative number as an end marker) for reload with
4553 given IN and OUT for INSN. Return also number points (through
4554 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
4555 the register pressure is high, number of references of the
4556 pseudo-registers (through NREFS), number of callee-clobbered
4557 hard-registers occupied by the pseudo-registers (through
4558 CALL_USED_COUNT), and the first hard regno occupied by the
4559 pseudo-registers (through FIRST_HARD_REGNO). */
4560 static int
4561 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
4562 int *excess_pressure_live_length,
4563 int *nrefs, int *call_used_count, int *first_hard_regno)
4565 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
4566 bool in_p, out_p;
4567 int length;
4568 ira_allocno_t a;
4570 *nrefs = 0;
4571 for (length = count = cost = i = 0;; i++)
4573 regno = regnos[i];
4574 if (regno < 0)
4575 break;
4576 *nrefs += REG_N_REFS (regno);
4577 hard_regno = reg_renumber[regno];
4578 ira_assert (hard_regno >= 0);
4579 a = ira_regno_allocno_map[regno];
4580 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
4581 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_CLASS_COST (a);
4582 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
4583 for (j = 0; j < nregs; j++)
4584 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
4585 break;
4586 if (j == nregs)
4587 count++;
4588 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
4589 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
4590 if ((in_p || out_p)
4591 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
4593 saved_cost = 0;
4594 if (in_p)
4595 saved_cost += ira_memory_move_cost
4596 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][1];
4597 if (out_p)
4598 saved_cost
4599 += ira_memory_move_cost
4600 [ALLOCNO_MODE (a)][ALLOCNO_CLASS (a)][0];
4601 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
4604 *excess_pressure_live_length = length;
4605 *call_used_count = count;
4606 hard_regno = -1;
4607 if (regnos[0] >= 0)
4609 hard_regno = reg_renumber[regnos[0]];
4611 *first_hard_regno = hard_regno;
4612 return cost;
4615 /* Return TRUE if spilling pseudo-registers whose numbers are in array
4616 REGNOS is better than spilling pseudo-registers with numbers in
4617 OTHER_REGNOS for reload with given IN and OUT for INSN. The
4618 function used by the reload pass to make better register spilling
4619 decisions. */
4620 bool
4621 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
4622 rtx in, rtx out, rtx insn)
4624 int cost, other_cost;
4625 int length, other_length;
4626 int nrefs, other_nrefs;
4627 int call_used_count, other_call_used_count;
4628 int hard_regno, other_hard_regno;
4630 cost = calculate_spill_cost (regnos, in, out, insn,
4631 &length, &nrefs, &call_used_count, &hard_regno);
4632 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
4633 &other_length, &other_nrefs,
4634 &other_call_used_count,
4635 &other_hard_regno);
4636 if (nrefs == 0 && other_nrefs != 0)
4637 return true;
4638 if (nrefs != 0 && other_nrefs == 0)
4639 return false;
4640 if (cost != other_cost)
4641 return cost < other_cost;
4642 if (length != other_length)
4643 return length > other_length;
4644 #ifdef REG_ALLOC_ORDER
4645 if (hard_regno >= 0 && other_hard_regno >= 0)
4646 return (inv_reg_alloc_order[hard_regno]
4647 < inv_reg_alloc_order[other_hard_regno]);
4648 #else
4649 if (call_used_count != other_call_used_count)
4650 return call_used_count > other_call_used_count;
4651 #endif
4652 return false;
4657 /* Allocate and initialize data necessary for assign_hard_reg. */
4658 void
4659 ira_initiate_assign (void)
4661 sorted_allocnos
4662 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4663 * ira_allocnos_num);
4664 consideration_allocno_bitmap = ira_allocate_bitmap ();
4665 initiate_cost_update ();
4666 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4667 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
4668 * sizeof (ira_copy_t));
4671 /* Deallocate data used by assign_hard_reg. */
4672 void
4673 ira_finish_assign (void)
4675 ira_free (sorted_allocnos);
4676 ira_free_bitmap (consideration_allocno_bitmap);
4677 finish_cost_update ();
4678 ira_free (allocno_priorities);
4679 ira_free (sorted_copies);
4684 /* Entry function doing color-based register allocation. */
4685 static void
4686 color (void)
4688 allocno_stack_vec.create (ira_allocnos_num);
4689 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
4690 ira_initiate_assign ();
4691 do_coloring ();
4692 ira_finish_assign ();
4693 allocno_stack_vec.release ();
4694 move_spill_restore ();
4699 /* This page contains a simple register allocator without usage of
4700 allocno conflicts. This is used for fast allocation for -O0. */
4702 /* Do register allocation by not using allocno conflicts. It uses
4703 only allocno live ranges. The algorithm is close to Chow's
4704 priority coloring. */
4705 static void
4706 fast_allocation (void)
4708 int i, j, k, num, class_size, hard_regno;
4709 #ifdef STACK_REGS
4710 bool no_stack_reg_p;
4711 #endif
4712 enum reg_class aclass;
4713 enum machine_mode mode;
4714 ira_allocno_t a;
4715 ira_allocno_iterator ai;
4716 live_range_t r;
4717 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
4719 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
4720 * ira_allocnos_num);
4721 num = 0;
4722 FOR_EACH_ALLOCNO (a, ai)
4723 sorted_allocnos[num++] = a;
4724 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
4725 setup_allocno_priorities (sorted_allocnos, num);
4726 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
4727 * ira_max_point);
4728 for (i = 0; i < ira_max_point; i++)
4729 CLEAR_HARD_REG_SET (used_hard_regs[i]);
4730 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
4731 allocno_priority_compare_func);
4732 for (i = 0; i < num; i++)
4734 int nr, l;
4736 a = sorted_allocnos[i];
4737 nr = ALLOCNO_NUM_OBJECTS (a);
4738 CLEAR_HARD_REG_SET (conflict_hard_regs);
4739 for (l = 0; l < nr; l++)
4741 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4742 IOR_HARD_REG_SET (conflict_hard_regs,
4743 OBJECT_CONFLICT_HARD_REGS (obj));
4744 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4745 for (j = r->start; j <= r->finish; j++)
4746 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
4748 aclass = ALLOCNO_CLASS (a);
4749 ALLOCNO_ASSIGNED_P (a) = true;
4750 ALLOCNO_HARD_REGNO (a) = -1;
4751 if (hard_reg_set_subset_p (reg_class_contents[aclass],
4752 conflict_hard_regs))
4753 continue;
4754 mode = ALLOCNO_MODE (a);
4755 #ifdef STACK_REGS
4756 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
4757 #endif
4758 class_size = ira_class_hard_regs_num[aclass];
4759 for (j = 0; j < class_size; j++)
4761 hard_regno = ira_class_hard_regs[aclass][j];
4762 #ifdef STACK_REGS
4763 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
4764 && hard_regno <= LAST_STACK_REG)
4765 continue;
4766 #endif
4767 if (ira_hard_reg_set_intersection_p (hard_regno, mode, conflict_hard_regs)
4768 || (TEST_HARD_REG_BIT
4769 (ira_prohibited_class_mode_regs[aclass][mode], hard_regno)))
4770 continue;
4771 ALLOCNO_HARD_REGNO (a) = hard_regno;
4772 for (l = 0; l < nr; l++)
4774 ira_object_t obj = ALLOCNO_OBJECT (a, l);
4775 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
4776 for (k = r->start; k <= r->finish; k++)
4777 IOR_HARD_REG_SET (used_hard_regs[k],
4778 ira_reg_mode_hard_regset[hard_regno][mode]);
4780 break;
4783 ira_free (sorted_allocnos);
4784 ira_free (used_hard_regs);
4785 ira_free (allocno_priorities);
4786 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
4787 ira_print_disposition (ira_dump_file);
4792 /* Entry function doing coloring. */
4793 void
4794 ira_color (void)
4796 ira_allocno_t a;
4797 ira_allocno_iterator ai;
4799 /* Setup updated costs. */
4800 FOR_EACH_ALLOCNO (a, ai)
4802 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
4803 ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
4805 if (ira_conflicts_p)
4806 color ();
4807 else
4808 fast_allocation ();