2010-09-28 Tobias Burnus <burnus@net-b.de>
[official-gcc.git] / gcc / ira-color.c
blob7f02bcf45f3a955a153f1d1a1b7634073daf6e51
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "sbitmap.h"
32 #include "bitmap.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "toplev.h"
38 #include "reload.h"
39 #include "params.h"
40 #include "df.h"
41 #include "splay-tree.h"
42 #include "ira-int.h"
44 /* This file contains code for regional graph coloring, spill/restore
45 code placement optimization, and code helping the reload pass to do
46 a better job. */
48 /* Bitmap of allocnos which should be colored. */
49 static bitmap coloring_allocno_bitmap;
51 /* Bitmap of allocnos which should be taken into account during
52 coloring. In general case it contains allocnos from
53 coloring_allocno_bitmap plus other already colored conflicting
54 allocnos. */
55 static bitmap consideration_allocno_bitmap;
57 /* TRUE if we coalesced some allocnos. In other words, if we got
58 loops formed by members first_coalesced_allocno and
59 next_coalesced_allocno containing more one allocno. */
60 static bool allocno_coalesced_p;
62 /* Bitmap used to prevent a repeated allocno processing because of
63 coalescing. */
64 static bitmap processed_coalesced_allocno_bitmap;
66 /* All allocnos sorted according their priorities. */
67 static ira_allocno_t *sorted_allocnos;
69 /* Vec representing the stack of allocnos used during coloring. */
70 static VEC(ira_allocno_t,heap) *allocno_stack_vec;
72 /* Array used to choose an allocno for spilling. */
73 static ira_allocno_t *allocnos_for_spilling;
75 /* Pool for splay tree nodes. */
76 static alloc_pool splay_tree_node_pool;
78 /* When an allocno is removed from the splay tree, it is put in the
79 following vector for subsequent inserting it into the splay tree
80 after putting all colorable allocnos onto the stack. The allocno
81 could be removed from and inserted to the splay tree every time
82 when its spilling priority is changed but such solution would be
83 more costly although simpler. */
84 static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
86 /* Helper for qsort comparison callbacks - return a positive integer if
87 X > Y, or a negative value otherwise. Use a conditional expression
88 instead of a difference computation to insulate from possible overflow
89 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
90 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
94 /* This page contains functions used to find conflicts using allocno
95 live ranges. */
97 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
98 used to find a conflict for new allocnos or allocnos with the
99 different cover classes. */
100 static bool
101 allocnos_have_intersected_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
103 int i, j;
104 int n1 = ALLOCNO_NUM_OBJECTS (a1);
105 int n2 = ALLOCNO_NUM_OBJECTS (a2);
107 if (a1 == a2)
108 return false;
109 if (ALLOCNO_REG (a1) != NULL && ALLOCNO_REG (a2) != NULL
110 && (ORIGINAL_REGNO (ALLOCNO_REG (a1))
111 == ORIGINAL_REGNO (ALLOCNO_REG (a2))))
112 return false;
114 for (i = 0; i < n1; i++)
116 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
117 for (j = 0; j < n2; j++)
119 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
120 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
121 OBJECT_LIVE_RANGES (c2)))
122 return true;
125 return false;
128 #ifdef ENABLE_IRA_CHECKING
130 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
131 intersect. This should be used when there is only one region.
132 Currently this is used during reload. */
133 static bool
134 pseudos_have_intersected_live_ranges_p (int regno1, int regno2)
136 ira_allocno_t a1, a2;
138 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
139 && regno2 >= FIRST_PSEUDO_REGISTER);
140 /* Reg info caclulated by dataflow infrastructure can be different
141 from one calculated by regclass. */
142 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
143 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
144 return false;
145 return allocnos_have_intersected_live_ranges_p (a1, a2);
148 #endif
152 /* This page contains functions used to choose hard registers for
153 allocnos. */
155 /* Array whose element value is TRUE if the corresponding hard
156 register was already allocated for an allocno. */
157 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
159 /* Describes one element in a queue of allocnos whose costs need to be
160 updated. Each allocno in the queue is known to have a cover class. */
161 struct update_cost_queue_elem
163 /* This element is in the queue iff CHECK == update_cost_check. */
164 int check;
166 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
167 connecting this allocno to the one being allocated. */
168 int divisor;
170 /* The next allocno in the queue, or null if this is the last element. */
171 ira_allocno_t next;
174 /* The first element in a queue of allocnos whose copy costs need to be
175 updated. Null if the queue is empty. */
176 static ira_allocno_t update_cost_queue;
178 /* The last element in the queue described by update_cost_queue.
179 Not valid if update_cost_queue is null. */
180 static struct update_cost_queue_elem *update_cost_queue_tail;
182 /* A pool of elements in the queue described by update_cost_queue.
183 Elements are indexed by ALLOCNO_NUM. */
184 static struct update_cost_queue_elem *update_cost_queue_elems;
186 /* The current value of update_copy_cost call count. */
187 static int update_cost_check;
189 /* Allocate and initialize data necessary for function
190 update_copy_costs. */
191 static void
192 initiate_cost_update (void)
194 size_t size;
196 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
197 update_cost_queue_elems
198 = (struct update_cost_queue_elem *) ira_allocate (size);
199 memset (update_cost_queue_elems, 0, size);
200 update_cost_check = 0;
203 /* Deallocate data used by function update_copy_costs. */
204 static void
205 finish_cost_update (void)
207 ira_free (update_cost_queue_elems);
210 /* When we traverse allocnos to update hard register costs, the cost
211 divisor will be multiplied by the following macro value for each
212 hop from given allocno to directly connected allocnos. */
213 #define COST_HOP_DIVISOR 4
215 /* Start a new cost-updating pass. */
216 static void
217 start_update_cost (void)
219 update_cost_check++;
220 update_cost_queue = NULL;
223 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
224 unless ALLOCNO is already in the queue, or has no cover class. */
225 static inline void
226 queue_update_cost (ira_allocno_t allocno, int divisor)
228 struct update_cost_queue_elem *elem;
230 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
231 if (elem->check != update_cost_check
232 && ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
234 elem->check = update_cost_check;
235 elem->divisor = divisor;
236 elem->next = NULL;
237 if (update_cost_queue == NULL)
238 update_cost_queue = allocno;
239 else
240 update_cost_queue_tail->next = allocno;
241 update_cost_queue_tail = elem;
245 /* Try to remove the first element from update_cost_queue. Return false
246 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
247 the removed element. */
248 static inline bool
249 get_next_update_cost (ira_allocno_t *allocno, int *divisor)
251 struct update_cost_queue_elem *elem;
253 if (update_cost_queue == NULL)
254 return false;
256 *allocno = update_cost_queue;
257 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
258 *divisor = elem->divisor;
259 update_cost_queue = elem->next;
260 return true;
263 /* Update the cost of allocnos to increase chances to remove some
264 copies as the result of subsequent assignment. */
265 static void
266 update_copy_costs (ira_allocno_t allocno, bool decr_p)
268 int i, cost, update_cost, hard_regno, divisor;
269 enum machine_mode mode;
270 enum reg_class rclass, cover_class;
271 ira_allocno_t another_allocno;
272 ira_copy_t cp, next_cp;
274 hard_regno = ALLOCNO_HARD_REGNO (allocno);
275 ira_assert (hard_regno >= 0);
277 cover_class = ALLOCNO_COVER_CLASS (allocno);
278 if (cover_class == NO_REGS)
279 return;
280 i = ira_class_hard_reg_index[cover_class][hard_regno];
281 ira_assert (i >= 0);
282 rclass = REGNO_REG_CLASS (hard_regno);
284 start_update_cost ();
285 divisor = 1;
288 mode = ALLOCNO_MODE (allocno);
289 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
291 if (cp->first == allocno)
293 next_cp = cp->next_first_allocno_copy;
294 another_allocno = cp->second;
296 else if (cp->second == allocno)
298 next_cp = cp->next_second_allocno_copy;
299 another_allocno = cp->first;
301 else
302 gcc_unreachable ();
304 cover_class = ALLOCNO_COVER_CLASS (another_allocno);
305 if (! ira_reg_classes_intersect_p[rclass][cover_class]
306 || ALLOCNO_ASSIGNED_P (another_allocno))
307 continue;
309 cost = (cp->second == allocno
310 ? ira_get_register_move_cost (mode, rclass, cover_class)
311 : ira_get_register_move_cost (mode, cover_class, rclass));
312 if (decr_p)
313 cost = -cost;
315 update_cost = cp->freq * cost / divisor;
316 if (update_cost == 0)
317 continue;
319 ira_allocate_and_set_or_copy_costs
320 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
321 ALLOCNO_UPDATED_COVER_CLASS_COST (another_allocno),
322 ALLOCNO_HARD_REG_COSTS (another_allocno));
323 ira_allocate_and_set_or_copy_costs
324 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
325 cover_class, 0,
326 ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
327 i = ira_class_hard_reg_index[cover_class][hard_regno];
328 ira_assert (i >= 0);
329 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
330 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
331 += update_cost;
333 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
336 while (get_next_update_cost (&allocno, &divisor));
339 /* This function updates COSTS (decrease if DECR_P) for hard_registers
340 of COVER_CLASS by conflict costs of the unassigned allocnos
341 connected by copies with allocnos in update_cost_queue. This
342 update increases chances to remove some copies. */
343 static void
344 update_conflict_hard_regno_costs (int *costs, enum reg_class cover_class,
345 bool decr_p)
347 int i, cost, class_size, freq, mult, div, divisor;
348 int index, hard_regno;
349 int *conflict_costs;
350 bool cont_p;
351 enum reg_class another_cover_class;
352 ira_allocno_t allocno, another_allocno;
353 ira_copy_t cp, next_cp;
355 while (get_next_update_cost (&allocno, &divisor))
356 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
358 if (cp->first == allocno)
360 next_cp = cp->next_first_allocno_copy;
361 another_allocno = cp->second;
363 else if (cp->second == allocno)
365 next_cp = cp->next_second_allocno_copy;
366 another_allocno = cp->first;
368 else
369 gcc_unreachable ();
370 another_cover_class = ALLOCNO_COVER_CLASS (another_allocno);
371 if (! ira_reg_classes_intersect_p[cover_class][another_cover_class]
372 || ALLOCNO_ASSIGNED_P (another_allocno)
373 || ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
374 (another_allocno)))
375 continue;
376 class_size = ira_class_hard_regs_num[another_cover_class];
377 ira_allocate_and_copy_costs
378 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
379 another_cover_class,
380 ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
381 conflict_costs
382 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
383 if (conflict_costs == NULL)
384 cont_p = true;
385 else
387 mult = cp->freq;
388 freq = ALLOCNO_FREQ (another_allocno);
389 if (freq == 0)
390 freq = 1;
391 div = freq * divisor;
392 cont_p = false;
393 for (i = class_size - 1; i >= 0; i--)
395 hard_regno = ira_class_hard_regs[another_cover_class][i];
396 ira_assert (hard_regno >= 0);
397 index = ira_class_hard_reg_index[cover_class][hard_regno];
398 if (index < 0)
399 continue;
400 cost = conflict_costs [i] * mult / div;
401 if (cost == 0)
402 continue;
403 cont_p = true;
404 if (decr_p)
405 cost = -cost;
406 costs[index] += cost;
409 /* Probably 5 hops will be enough. */
410 if (cont_p
411 && divisor <= (COST_HOP_DIVISOR
412 * COST_HOP_DIVISOR
413 * COST_HOP_DIVISOR
414 * COST_HOP_DIVISOR))
415 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
419 /* Sort allocnos according to the profit of usage of a hard register
420 instead of memory for them. */
421 static int
422 allocno_cost_compare_func (const void *v1p, const void *v2p)
424 ira_allocno_t p1 = *(const ira_allocno_t *) v1p;
425 ira_allocno_t p2 = *(const ira_allocno_t *) v2p;
426 int c1, c2;
428 c1 = ALLOCNO_UPDATED_MEMORY_COST (p1) - ALLOCNO_UPDATED_COVER_CLASS_COST (p1);
429 c2 = ALLOCNO_UPDATED_MEMORY_COST (p2) - ALLOCNO_UPDATED_COVER_CLASS_COST (p2);
430 if (c1 - c2)
431 return c1 - c2;
433 /* If regs are equally good, sort by allocno numbers, so that the
434 results of qsort leave nothing to chance. */
435 return ALLOCNO_NUM (p1) - ALLOCNO_NUM (p2);
438 /* Print all allocnos coalesced with ALLOCNO. */
439 static void
440 print_coalesced_allocno (ira_allocno_t allocno)
442 ira_allocno_t a;
444 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
445 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
447 ira_print_expanded_allocno (a);
448 if (a == allocno)
449 break;
450 fprintf (ira_dump_file, "+");
454 /* Choose a hard register for ALLOCNO (or for all coalesced allocnos
455 represented by ALLOCNO). If RETRY_P is TRUE, it means that the
456 function called from function `ira_reassign_conflict_allocnos' and
457 `allocno_reload_assign'. This function implements the optimistic
458 coalescing too: if we failed to assign a hard register to set of
459 the coalesced allocnos, we put them onto the coloring stack for
460 subsequent separate assigning. */
461 static bool
462 assign_hard_reg (ira_allocno_t allocno, bool retry_p)
464 HARD_REG_SET conflicting_regs[2];
465 int i, j, hard_regno, nregs, best_hard_regno, class_size;
466 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords;
467 int *a_costs;
468 enum reg_class cover_class;
469 enum machine_mode mode;
470 ira_allocno_t a;
471 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
472 #ifndef HONOR_REG_ALLOC_ORDER
473 enum reg_class rclass;
474 int add_cost;
475 #endif
476 #ifdef STACK_REGS
477 bool no_stack_reg_p;
478 #endif
480 nwords = ALLOCNO_NUM_OBJECTS (allocno);
481 ira_assert (! ALLOCNO_ASSIGNED_P (allocno));
482 cover_class = ALLOCNO_COVER_CLASS (allocno);
483 class_size = ira_class_hard_regs_num[cover_class];
484 mode = ALLOCNO_MODE (allocno);
485 for (i = 0; i < nwords; i++)
486 CLEAR_HARD_REG_SET (conflicting_regs[i]);
487 best_hard_regno = -1;
488 memset (full_costs, 0, sizeof (int) * class_size);
489 mem_cost = 0;
490 if (allocno_coalesced_p)
491 bitmap_clear (processed_coalesced_allocno_bitmap);
492 memset (costs, 0, sizeof (int) * class_size);
493 memset (full_costs, 0, sizeof (int) * class_size);
494 #ifdef STACK_REGS
495 no_stack_reg_p = false;
496 #endif
497 start_update_cost ();
498 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
499 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
501 int word;
502 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
504 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
505 cover_class, ALLOCNO_HARD_REG_COSTS (a));
506 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
507 #ifdef STACK_REGS
508 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
509 #endif
510 cost = ALLOCNO_UPDATED_COVER_CLASS_COST (a);
511 for (i = 0; i < class_size; i++)
512 if (a_costs != NULL)
514 costs[i] += a_costs[i];
515 full_costs[i] += a_costs[i];
517 else
519 costs[i] += cost;
520 full_costs[i] += cost;
522 for (word = 0; word < nwords; word++)
524 ira_object_t conflict_obj;
525 ira_object_t obj = ALLOCNO_OBJECT (allocno, word);
526 ira_object_conflict_iterator oci;
528 IOR_HARD_REG_SET (conflicting_regs[word],
529 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
530 /* Take preferences of conflicting allocnos into account. */
531 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
533 ira_allocno_t conflict_allocno = OBJECT_ALLOCNO (conflict_obj);
534 enum reg_class conflict_cover_class;
535 /* Reload can give another class so we need to check all
536 allocnos. */
537 if (!retry_p && !bitmap_bit_p (consideration_allocno_bitmap,
538 ALLOCNO_NUM (conflict_allocno)))
539 continue;
540 conflict_cover_class = ALLOCNO_COVER_CLASS (conflict_allocno);
541 ira_assert (ira_reg_classes_intersect_p
542 [cover_class][conflict_cover_class]);
543 if (ALLOCNO_ASSIGNED_P (conflict_allocno))
545 hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno);
546 if (hard_regno >= 0
547 && ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
549 enum machine_mode mode = ALLOCNO_MODE (conflict_allocno);
550 int conflict_nregs = hard_regno_nregs[hard_regno][mode];
551 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_allocno);
552 if (conflict_nregs == n_objects && conflict_nregs > 1)
554 int num = OBJECT_SUBWORD (conflict_obj);
555 if (WORDS_BIG_ENDIAN)
556 SET_HARD_REG_BIT (conflicting_regs[word],
557 hard_regno + n_objects - num - 1);
558 else
559 SET_HARD_REG_BIT (conflicting_regs[word],
560 hard_regno + num);
562 else
563 IOR_HARD_REG_SET (conflicting_regs[word],
564 ira_reg_mode_hard_regset[hard_regno][mode]);
565 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
566 conflicting_regs[word]))
567 goto fail;
570 else if (! ALLOCNO_MAY_BE_SPILLED_P (ALLOCNO_FIRST_COALESCED_ALLOCNO
571 (conflict_allocno)))
573 int k, *conflict_costs;
575 if (allocno_coalesced_p)
577 if (!bitmap_set_bit (processed_coalesced_allocno_bitmap,
578 ALLOCNO_NUM (conflict_allocno)))
579 continue;
582 ira_allocate_and_copy_costs
583 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno),
584 conflict_cover_class,
585 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_allocno));
586 conflict_costs
587 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_allocno);
588 if (conflict_costs != NULL)
589 for (j = class_size - 1; j >= 0; j--)
591 hard_regno = ira_class_hard_regs[cover_class][j];
592 ira_assert (hard_regno >= 0);
593 k = (ira_class_hard_reg_index
594 [conflict_cover_class][hard_regno]);
595 if (k < 0)
596 continue;
597 full_costs[j] -= conflict_costs[k];
599 queue_update_cost (conflict_allocno, COST_HOP_DIVISOR);
603 if (a == allocno)
604 break;
606 /* Take into account preferences of allocnos connected by copies to
607 the conflict allocnos. */
608 update_conflict_hard_regno_costs (full_costs, cover_class, true);
610 /* Take preferences of allocnos connected by copies into
611 account. */
612 start_update_cost ();
613 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
614 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
616 queue_update_cost (a, COST_HOP_DIVISOR);
617 if (a == allocno)
618 break;
620 update_conflict_hard_regno_costs (full_costs, cover_class, false);
621 min_cost = min_full_cost = INT_MAX;
623 /* We don't care about giving callee saved registers to allocnos no
624 living through calls because call clobbered registers are
625 allocated first (it is usual practice to put them first in
626 REG_ALLOC_ORDER). */
627 for (i = 0; i < class_size; i++)
629 hard_regno = ira_class_hard_regs[cover_class][i];
630 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (allocno)];
631 #ifdef STACK_REGS
632 if (no_stack_reg_p
633 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
634 continue;
635 #endif
636 if (TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
637 hard_regno))
638 continue;
639 for (j = 0; j < nregs; j++)
641 int k;
642 int set_to_test_start = 0, set_to_test_end = nwords;
643 if (nregs == nwords)
645 if (WORDS_BIG_ENDIAN)
646 set_to_test_start = nwords - j - 1;
647 else
648 set_to_test_start = j;
649 set_to_test_end = set_to_test_start + 1;
651 for (k = set_to_test_start; k < set_to_test_end; k++)
652 if (TEST_HARD_REG_BIT (conflicting_regs[k], hard_regno + j))
653 break;
654 if (k != set_to_test_end)
655 break;
657 if (j != nregs)
658 continue;
659 cost = costs[i];
660 full_cost = full_costs[i];
661 #ifndef HONOR_REG_ALLOC_ORDER
662 if (! allocated_hardreg_p[hard_regno]
663 && ira_hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
664 /* We need to save/restore the hard register in
665 epilogue/prologue. Therefore we increase the cost. */
667 /* ??? If only part is call clobbered. */
668 rclass = REGNO_REG_CLASS (hard_regno);
669 add_cost = (ira_memory_move_cost[mode][rclass][0]
670 + ira_memory_move_cost[mode][rclass][1] - 1);
671 cost += add_cost;
672 full_cost += add_cost;
674 #endif
675 if (min_cost > cost)
676 min_cost = cost;
677 if (min_full_cost > full_cost)
679 min_full_cost = full_cost;
680 best_hard_regno = hard_regno;
681 ira_assert (hard_regno >= 0);
684 if (min_full_cost > mem_cost)
686 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
687 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
688 mem_cost, min_full_cost);
689 best_hard_regno = -1;
691 fail:
692 if (flag_ira_algorithm != IRA_ALGORITHM_PRIORITY
693 && best_hard_regno < 0
694 && ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno) != allocno)
696 for (j = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
697 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
699 ira_assert (! ALLOCNO_IN_GRAPH_P (a));
700 sorted_allocnos[j++] = a;
701 if (a == allocno)
702 break;
704 qsort (sorted_allocnos, j, sizeof (ira_allocno_t),
705 allocno_cost_compare_func);
706 for (i = 0; i < j; i++)
708 a = sorted_allocnos[i];
709 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
710 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
711 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
712 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
714 fprintf (ira_dump_file, " Pushing");
715 print_coalesced_allocno (a);
716 fprintf (ira_dump_file, "\n");
719 return false;
721 if (best_hard_regno >= 0)
722 allocated_hardreg_p[best_hard_regno] = true;
723 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
724 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
726 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
727 ALLOCNO_ASSIGNED_P (a) = true;
728 if (best_hard_regno >= 0)
729 update_copy_costs (a, true);
730 ira_assert (ALLOCNO_COVER_CLASS (a) == cover_class);
731 /* We don't need updated costs anymore: */
732 ira_free_allocno_updated_costs (a);
733 if (a == allocno)
734 break;
736 return best_hard_regno >= 0;
741 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
743 /* Bucket of allocnos that can colored currently without spilling. */
744 static ira_allocno_t colorable_allocno_bucket;
746 /* Bucket of allocnos that might be not colored currently without
747 spilling. */
748 static ira_allocno_t uncolorable_allocno_bucket;
750 /* Each element of the array contains the current number of allocnos
751 of given *cover* class in the uncolorable_bucket. */
752 static int uncolorable_allocnos_num[N_REG_CLASSES];
754 /* Return the current spill priority of allocno A. The less the
755 number, the more preferable the allocno for spilling. */
756 static int
757 allocno_spill_priority (ira_allocno_t a)
759 return (ALLOCNO_TEMP (a)
760 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a)
761 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]
762 + 1));
765 /* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
766 before the call. */
767 static void
768 add_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
770 ira_allocno_t first_allocno;
771 enum reg_class cover_class;
773 if (bucket_ptr == &uncolorable_allocno_bucket
774 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
776 uncolorable_allocnos_num[cover_class]++;
777 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
779 first_allocno = *bucket_ptr;
780 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = first_allocno;
781 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = NULL;
782 if (first_allocno != NULL)
783 ALLOCNO_PREV_BUCKET_ALLOCNO (first_allocno) = allocno;
784 *bucket_ptr = allocno;
787 /* The function returns frequency and number of available hard
788 registers for allocnos coalesced with ALLOCNO. */
789 static void
790 get_coalesced_allocnos_attributes (ira_allocno_t allocno, int *freq, int *num)
792 ira_allocno_t a;
794 *freq = 0;
795 *num = 0;
796 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
797 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
799 *freq += ALLOCNO_FREQ (a);
800 *num += ALLOCNO_AVAILABLE_REGS_NUM (a);
801 if (a == allocno)
802 break;
806 /* Compare two allocnos to define which allocno should be pushed first
807 into the coloring stack. If the return is a negative number, the
808 allocno given by the first parameter will be pushed first. In this
809 case such allocno has less priority than the second one and the
810 hard register will be assigned to it after assignment to the second
811 one. As the result of such assignment order, the second allocno
812 has a better chance to get the best hard register. */
813 static int
814 bucket_allocno_compare_func (const void *v1p, const void *v2p)
816 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
817 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
818 int diff, a1_freq, a2_freq, a1_num, a2_num;
820 if ((diff = (int) ALLOCNO_COVER_CLASS (a2) - ALLOCNO_COVER_CLASS (a1)) != 0)
821 return diff;
822 get_coalesced_allocnos_attributes (a1, &a1_freq, &a1_num);
823 get_coalesced_allocnos_attributes (a2, &a2_freq, &a2_num);
824 if ((diff = a2_num - a1_num) != 0)
825 return diff;
826 else if ((diff = a1_freq - a2_freq) != 0)
827 return diff;
828 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
831 /* Sort bucket *BUCKET_PTR and return the result through
832 BUCKET_PTR. */
833 static void
834 sort_bucket (ira_allocno_t *bucket_ptr)
836 ira_allocno_t a, head;
837 int n;
839 for (n = 0, a = *bucket_ptr; a != NULL; a = ALLOCNO_NEXT_BUCKET_ALLOCNO (a))
840 sorted_allocnos[n++] = a;
841 if (n <= 1)
842 return;
843 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
844 bucket_allocno_compare_func);
845 head = NULL;
846 for (n--; n >= 0; n--)
848 a = sorted_allocnos[n];
849 ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = head;
850 ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
851 if (head != NULL)
852 ALLOCNO_PREV_BUCKET_ALLOCNO (head) = a;
853 head = a;
855 *bucket_ptr = head;
858 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
859 their priority. ALLOCNO should be not in a bucket before the
860 call. */
861 static void
862 add_allocno_to_ordered_bucket (ira_allocno_t allocno,
863 ira_allocno_t *bucket_ptr)
865 ira_allocno_t before, after;
866 enum reg_class cover_class;
868 if (bucket_ptr == &uncolorable_allocno_bucket
869 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
871 uncolorable_allocnos_num[cover_class]++;
872 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
874 for (before = *bucket_ptr, after = NULL;
875 before != NULL;
876 after = before, before = ALLOCNO_NEXT_BUCKET_ALLOCNO (before))
877 if (bucket_allocno_compare_func (&allocno, &before) < 0)
878 break;
879 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = before;
880 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = after;
881 if (after == NULL)
882 *bucket_ptr = allocno;
883 else
884 ALLOCNO_NEXT_BUCKET_ALLOCNO (after) = allocno;
885 if (before != NULL)
886 ALLOCNO_PREV_BUCKET_ALLOCNO (before) = allocno;
889 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
890 the call. */
891 static void
892 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
894 ira_allocno_t prev_allocno, next_allocno;
895 enum reg_class cover_class;
897 if (bucket_ptr == &uncolorable_allocno_bucket
898 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
900 uncolorable_allocnos_num[cover_class]--;
901 ira_assert (uncolorable_allocnos_num[cover_class] >= 0);
903 prev_allocno = ALLOCNO_PREV_BUCKET_ALLOCNO (allocno);
904 next_allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno);
905 if (prev_allocno != NULL)
906 ALLOCNO_NEXT_BUCKET_ALLOCNO (prev_allocno) = next_allocno;
907 else
909 ira_assert (*bucket_ptr == allocno);
910 *bucket_ptr = next_allocno;
912 if (next_allocno != NULL)
913 ALLOCNO_PREV_BUCKET_ALLOCNO (next_allocno) = prev_allocno;
916 /* Splay tree for each cover class. The trees are indexed by the
917 corresponding cover classes. Splay trees contain uncolorable
918 allocnos. */
919 static splay_tree uncolorable_allocnos_splay_tree[N_REG_CLASSES];
921 /* If the following macro is TRUE, splay tree is used to choose an
922 allocno of the corresponding cover class for spilling. When the
923 number uncolorable allocnos of given cover class decreases to some
924 threshold, linear array search is used to find the best allocno for
925 spilling. This threshold is actually pretty big because, although
926 splay trees asymptotically is much faster, each splay tree
927 operation is sufficiently costly especially taking cache locality
928 into account. */
929 #define USE_SPLAY_P(CLASS) (uncolorable_allocnos_num[CLASS] > 4000)
931 /* Put ALLOCNO onto the coloring stack without removing it from its
932 bucket. Pushing allocno to the coloring stack can result in moving
933 conflicting allocnos from the uncolorable bucket to the colorable
934 one. */
935 static void
936 push_allocno_to_stack (ira_allocno_t allocno)
938 int size;
939 ira_allocno_t a;
940 enum reg_class cover_class;
942 ALLOCNO_IN_GRAPH_P (allocno) = false;
943 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, allocno);
944 cover_class = ALLOCNO_COVER_CLASS (allocno);
945 if (cover_class == NO_REGS)
946 return;
947 size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)];
948 if (ALLOCNO_NUM_OBJECTS (allocno) > 1)
950 /* We will deal with the subwords individually. */
951 gcc_assert (size == ALLOCNO_NUM_OBJECTS (allocno));
952 size = 1;
954 if (allocno_coalesced_p)
955 bitmap_clear (processed_coalesced_allocno_bitmap);
957 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
958 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
960 int i, n = ALLOCNO_NUM_OBJECTS (a);
961 for (i = 0; i < n; i++)
963 ira_object_t obj = ALLOCNO_OBJECT (a, i);
964 int conflict_size;
965 ira_object_t conflict_obj;
966 ira_object_conflict_iterator oci;
968 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
970 ira_allocno_t conflict_allocno = OBJECT_ALLOCNO (conflict_obj);
971 int left_conflicts_size;
973 conflict_allocno = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
974 if (!bitmap_bit_p (coloring_allocno_bitmap,
975 ALLOCNO_NUM (conflict_allocno)))
976 continue;
978 ira_assert (cover_class
979 == ALLOCNO_COVER_CLASS (conflict_allocno));
980 if (allocno_coalesced_p)
982 conflict_obj = ALLOCNO_OBJECT (conflict_allocno,
983 OBJECT_SUBWORD (conflict_obj));
984 if (!bitmap_set_bit (processed_coalesced_allocno_bitmap,
985 OBJECT_CONFLICT_ID (conflict_obj)))
986 continue;
989 if (!ALLOCNO_IN_GRAPH_P (conflict_allocno)
990 || ALLOCNO_ASSIGNED_P (conflict_allocno))
991 continue;
993 left_conflicts_size = ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno);
994 conflict_size
995 = (ira_reg_class_nregs
996 [cover_class][ALLOCNO_MODE (conflict_allocno)]);
997 ira_assert (left_conflicts_size >= size);
998 if (left_conflicts_size + conflict_size
999 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
1001 ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno) -= size;
1002 continue;
1004 left_conflicts_size -= size;
1005 if (uncolorable_allocnos_splay_tree[cover_class] != NULL
1006 && !ALLOCNO_SPLAY_REMOVED_P (conflict_allocno)
1007 && USE_SPLAY_P (cover_class))
1009 ira_assert
1010 (splay_tree_lookup
1011 (uncolorable_allocnos_splay_tree[cover_class],
1012 (splay_tree_key) conflict_allocno) != NULL);
1013 splay_tree_remove
1014 (uncolorable_allocnos_splay_tree[cover_class],
1015 (splay_tree_key) conflict_allocno);
1016 ALLOCNO_SPLAY_REMOVED_P (conflict_allocno) = true;
1017 VEC_safe_push (ira_allocno_t, heap,
1018 removed_splay_allocno_vec,
1019 conflict_allocno);
1021 ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_allocno)
1022 = left_conflicts_size;
1023 if (left_conflicts_size + conflict_size
1024 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_allocno))
1026 delete_allocno_from_bucket
1027 (conflict_allocno, &uncolorable_allocno_bucket);
1028 add_allocno_to_ordered_bucket
1029 (conflict_allocno, &colorable_allocno_bucket);
1033 if (a == allocno)
1034 break;
1038 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
1039 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
1040 static void
1041 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
1043 enum reg_class cover_class;
1045 if (colorable_p)
1046 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
1047 else
1048 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
1049 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1051 fprintf (ira_dump_file, " Pushing");
1052 print_coalesced_allocno (allocno);
1053 if (colorable_p)
1054 fprintf (ira_dump_file, "\n");
1055 else
1056 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
1057 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
1058 allocno_spill_priority (allocno), ALLOCNO_TEMP (allocno));
1060 cover_class = ALLOCNO_COVER_CLASS (allocno);
1061 ira_assert ((colorable_p
1062 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1063 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
1064 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
1065 || (! colorable_p
1066 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1067 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
1068 (allocno)]
1069 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
1070 if (! colorable_p)
1071 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
1072 push_allocno_to_stack (allocno);
1075 /* Put all allocnos from colorable bucket onto the coloring stack. */
1076 static void
1077 push_only_colorable (void)
1079 sort_bucket (&colorable_allocno_bucket);
1080 for (;colorable_allocno_bucket != NULL;)
1081 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
1084 /* Puts ALLOCNO chosen for potential spilling onto the coloring
1085 stack. */
1086 static void
1087 push_allocno_to_spill (ira_allocno_t allocno)
1089 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
1090 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
1091 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1092 fprintf (ira_dump_file, " Pushing p%d(%d) (spill for NO_REGS)\n",
1093 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
1094 push_allocno_to_stack (allocno);
1097 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
1098 loop given by its LOOP_NODE. */
1100 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
1102 int freq, i;
1103 edge_iterator ei;
1104 edge e;
1105 VEC (edge, heap) *edges;
1107 ira_assert (loop_node->loop != NULL
1108 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
1109 freq = 0;
1110 if (! exit_p)
1112 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1113 if (e->src != loop_node->loop->latch
1114 && (regno < 0
1115 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
1116 && bitmap_bit_p (DF_LR_IN (e->dest), regno))))
1117 freq += EDGE_FREQUENCY (e);
1119 else
1121 edges = get_loop_exit_edges (loop_node->loop);
1122 FOR_EACH_VEC_ELT (edge, edges, i, e)
1123 if (regno < 0
1124 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
1125 && bitmap_bit_p (DF_LR_IN (e->dest), regno)))
1126 freq += EDGE_FREQUENCY (e);
1127 VEC_free (edge, heap, edges);
1130 return REG_FREQ_FROM_EDGE_FREQ (freq);
1133 /* Calculate and return the cost of putting allocno A into memory. */
1134 static int
1135 calculate_allocno_spill_cost (ira_allocno_t a)
1137 int regno, cost;
1138 enum machine_mode mode;
1139 enum reg_class rclass;
1140 ira_allocno_t parent_allocno;
1141 ira_loop_tree_node_t parent_node, loop_node;
1143 regno = ALLOCNO_REGNO (a);
1144 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_COVER_CLASS_COST (a);
1145 if (ALLOCNO_CAP (a) != NULL)
1146 return cost;
1147 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
1148 if ((parent_node = loop_node->parent) == NULL)
1149 return cost;
1150 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
1151 return cost;
1152 mode = ALLOCNO_MODE (a);
1153 rclass = ALLOCNO_COVER_CLASS (a);
1154 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
1155 cost -= (ira_memory_move_cost[mode][rclass][0]
1156 * ira_loop_edge_freq (loop_node, regno, true)
1157 + ira_memory_move_cost[mode][rclass][1]
1158 * ira_loop_edge_freq (loop_node, regno, false));
1159 else
1160 cost += ((ira_memory_move_cost[mode][rclass][1]
1161 * ira_loop_edge_freq (loop_node, regno, true)
1162 + ira_memory_move_cost[mode][rclass][0]
1163 * ira_loop_edge_freq (loop_node, regno, false))
1164 - (ira_get_register_move_cost (mode, rclass, rclass)
1165 * (ira_loop_edge_freq (loop_node, regno, false)
1166 + ira_loop_edge_freq (loop_node, regno, true))));
1167 return cost;
1170 /* Compare keys in the splay tree used to choose best allocno for
1171 spilling. The best allocno has the minimal key. */
1172 static int
1173 allocno_spill_priority_compare (splay_tree_key k1, splay_tree_key k2)
1175 int pri1, pri2, diff;
1176 ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
1178 pri1 = (ALLOCNO_TEMP (a1)
1179 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
1180 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
1181 + 1));
1182 pri2 = (ALLOCNO_TEMP (a2)
1183 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a2)
1184 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
1185 + 1));
1186 if ((diff = pri1 - pri2) != 0)
1187 return diff;
1188 if ((diff = ALLOCNO_TEMP (a1) - ALLOCNO_TEMP (a2)) != 0)
1189 return diff;
1190 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
1193 /* Allocate data of SIZE for the splay trees. We allocate only spay
1194 tree roots or splay tree nodes. If you change this, please rewrite
1195 the function. */
1196 static void *
1197 splay_tree_allocate (int size, void *data ATTRIBUTE_UNUSED)
1199 if (size != sizeof (struct splay_tree_node_s))
1200 return ira_allocate (size);
1201 return pool_alloc (splay_tree_node_pool);
1204 /* Free data NODE for the splay trees. We allocate and free only spay
1205 tree roots or splay tree nodes. If you change this, please rewrite
1206 the function. */
1207 static void
1208 splay_tree_free (void *node, void *data ATTRIBUTE_UNUSED)
1210 int i;
1211 enum reg_class cover_class;
1213 for (i = 0; i < ira_reg_class_cover_size; i++)
1215 cover_class = ira_reg_class_cover[i];
1216 if (node == uncolorable_allocnos_splay_tree[cover_class])
1218 ira_free (node);
1219 return;
1222 pool_free (splay_tree_node_pool, node);
1225 /* Push allocnos to the coloring stack. The order of allocnos in the
1226 stack defines the order for the subsequent coloring. */
1227 static void
1228 push_allocnos_to_stack (void)
1230 ira_allocno_t allocno, a, i_allocno, *allocno_vec;
1231 enum reg_class cover_class, rclass;
1232 int allocno_pri, i_allocno_pri, allocno_cost, i_allocno_cost;
1233 int i, j, num, cover_class_allocnos_num[N_REG_CLASSES];
1234 ira_allocno_t *cover_class_allocnos[N_REG_CLASSES];
1235 int cost;
1237 /* Initialize. */
1238 VEC_truncate(ira_allocno_t, removed_splay_allocno_vec, 0);
1239 for (i = 0; i < ira_reg_class_cover_size; i++)
1241 cover_class = ira_reg_class_cover[i];
1242 cover_class_allocnos_num[cover_class] = 0;
1243 cover_class_allocnos[cover_class] = NULL;
1244 uncolorable_allocnos_splay_tree[cover_class] = NULL;
1246 /* Calculate uncolorable allocno spill costs. */
1247 for (allocno = uncolorable_allocno_bucket;
1248 allocno != NULL;
1249 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1250 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1252 cover_class_allocnos_num[cover_class]++;
1253 cost = 0;
1254 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1255 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1257 cost += calculate_allocno_spill_cost (a);
1258 if (a == allocno)
1259 break;
1261 /* ??? Remove cost of copies between the coalesced
1262 allocnos. */
1263 ALLOCNO_TEMP (allocno) = cost;
1265 /* Define place where to put uncolorable allocnos of the same cover
1266 class. */
1267 for (num = i = 0; i < ira_reg_class_cover_size; i++)
1269 cover_class = ira_reg_class_cover[i];
1270 ira_assert (cover_class_allocnos_num[cover_class]
1271 == uncolorable_allocnos_num[cover_class]);
1272 if (cover_class_allocnos_num[cover_class] != 0)
1274 cover_class_allocnos[cover_class] = allocnos_for_spilling + num;
1275 num += cover_class_allocnos_num[cover_class];
1276 cover_class_allocnos_num[cover_class] = 0;
1278 if (USE_SPLAY_P (cover_class))
1279 uncolorable_allocnos_splay_tree[cover_class]
1280 = splay_tree_new_with_allocator (allocno_spill_priority_compare,
1281 NULL, NULL, splay_tree_allocate,
1282 splay_tree_free, NULL);
1284 ira_assert (num <= ira_allocnos_num);
1285 /* Collect uncolorable allocnos of each cover class. */
1286 for (allocno = uncolorable_allocno_bucket;
1287 allocno != NULL;
1288 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1289 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1291 cover_class_allocnos
1292 [cover_class][cover_class_allocnos_num[cover_class]++] = allocno;
1293 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1294 splay_tree_insert (uncolorable_allocnos_splay_tree[cover_class],
1295 (splay_tree_key) allocno,
1296 (splay_tree_value) allocno);
1298 for (;;)
1300 push_only_colorable ();
1301 allocno = uncolorable_allocno_bucket;
1302 if (allocno == NULL)
1303 break;
1304 cover_class = ALLOCNO_COVER_CLASS (allocno);
1305 if (cover_class == NO_REGS)
1307 push_allocno_to_spill (allocno);
1308 continue;
1310 /* Potential spilling. */
1311 ira_assert
1312 (ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)] > 0);
1313 if (USE_SPLAY_P (cover_class))
1315 for (;VEC_length (ira_allocno_t, removed_splay_allocno_vec) != 0;)
1317 allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
1318 ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
1319 rclass = ALLOCNO_COVER_CLASS (allocno);
1320 if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1321 + ira_reg_class_nregs [rclass][ALLOCNO_MODE (allocno)]
1322 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1323 splay_tree_insert
1324 (uncolorable_allocnos_splay_tree[rclass],
1325 (splay_tree_key) allocno, (splay_tree_value) allocno);
1327 allocno = ((ira_allocno_t)
1328 splay_tree_min
1329 (uncolorable_allocnos_splay_tree[cover_class])->key);
1330 splay_tree_remove (uncolorable_allocnos_splay_tree[cover_class],
1331 (splay_tree_key) allocno);
1333 else
1335 num = cover_class_allocnos_num[cover_class];
1336 ira_assert (num > 0);
1337 allocno_vec = cover_class_allocnos[cover_class];
1338 allocno = NULL;
1339 allocno_pri = allocno_cost = 0;
1340 /* Sort uncolorable allocno to find the one with the lowest
1341 spill cost. */
1342 for (i = 0, j = num - 1; i <= j;)
1344 i_allocno = allocno_vec[i];
1345 if (! ALLOCNO_IN_GRAPH_P (i_allocno)
1346 && ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1348 i_allocno = allocno_vec[j];
1349 allocno_vec[j] = allocno_vec[i];
1350 allocno_vec[i] = i_allocno;
1352 if (ALLOCNO_IN_GRAPH_P (i_allocno))
1354 i++;
1355 ira_assert (ALLOCNO_TEMP (i_allocno) != INT_MAX);
1356 i_allocno_cost = ALLOCNO_TEMP (i_allocno);
1357 i_allocno_pri = allocno_spill_priority (i_allocno);
1358 if (allocno == NULL
1359 || (! ALLOCNO_BAD_SPILL_P (i_allocno)
1360 && ALLOCNO_BAD_SPILL_P (allocno))
1361 || (! (ALLOCNO_BAD_SPILL_P (i_allocno)
1362 && ! ALLOCNO_BAD_SPILL_P (allocno))
1363 && (allocno_pri > i_allocno_pri
1364 || (allocno_pri == i_allocno_pri
1365 && (allocno_cost > i_allocno_cost
1366 || (allocno_cost == i_allocno_cost
1367 && (ALLOCNO_NUM (allocno)
1368 > ALLOCNO_NUM (i_allocno))))))))
1370 allocno = i_allocno;
1371 allocno_cost = i_allocno_cost;
1372 allocno_pri = i_allocno_pri;
1375 if (! ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1376 j--;
1378 ira_assert (allocno != NULL && j >= 0);
1379 cover_class_allocnos_num[cover_class] = j + 1;
1381 ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
1382 && ALLOCNO_COVER_CLASS (allocno) == cover_class
1383 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1384 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
1385 (allocno)]
1386 > ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
1387 remove_allocno_from_bucket_and_push (allocno, false);
1389 ira_assert (colorable_allocno_bucket == NULL
1390 && uncolorable_allocno_bucket == NULL);
1391 for (i = 0; i < ira_reg_class_cover_size; i++)
1393 cover_class = ira_reg_class_cover[i];
1394 ira_assert (uncolorable_allocnos_num[cover_class] == 0);
1395 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1396 splay_tree_delete (uncolorable_allocnos_splay_tree[cover_class]);
1400 /* Pop the coloring stack and assign hard registers to the popped
1401 allocnos. */
1402 static void
1403 pop_allocnos_from_stack (void)
1405 ira_allocno_t allocno;
1406 enum reg_class cover_class;
1408 for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
1410 allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
1411 cover_class = ALLOCNO_COVER_CLASS (allocno);
1412 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1414 fprintf (ira_dump_file, " Popping");
1415 print_coalesced_allocno (allocno);
1416 fprintf (ira_dump_file, " -- ");
1418 if (cover_class == NO_REGS)
1420 ALLOCNO_HARD_REGNO (allocno) = -1;
1421 ALLOCNO_ASSIGNED_P (allocno) = true;
1422 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
1423 ira_assert
1424 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
1425 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1426 fprintf (ira_dump_file, "assign memory\n");
1428 else if (assign_hard_reg (allocno, false))
1430 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1431 fprintf (ira_dump_file, "assign reg %d\n",
1432 ALLOCNO_HARD_REGNO (allocno));
1434 else if (ALLOCNO_ASSIGNED_P (allocno))
1436 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1437 fprintf (ira_dump_file, "spill\n");
1439 ALLOCNO_IN_GRAPH_P (allocno) = true;
1443 /* Loop over all coalesced allocnos of ALLOCNO and their subobjects, collecting
1444 total hard register conflicts in PSET (which the caller must initialize). */
1445 static void
1446 all_conflicting_hard_regs_coalesced (ira_allocno_t allocno, HARD_REG_SET *pset)
1448 ira_allocno_t a;
1450 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1451 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1453 int i;
1454 int n = ALLOCNO_NUM_OBJECTS (a);
1455 for (i = 0; i < n; i++)
1457 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1458 IOR_HARD_REG_SET (*pset, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1460 if (a == allocno)
1461 break;
1465 /* Set up number of available hard registers for ALLOCNO. */
1466 static void
1467 setup_allocno_available_regs_num (ira_allocno_t allocno)
1469 int i, n, hard_regs_num, hard_regno;
1470 enum machine_mode mode;
1471 enum reg_class cover_class;
1472 HARD_REG_SET temp_set;
1474 cover_class = ALLOCNO_COVER_CLASS (allocno);
1475 ALLOCNO_AVAILABLE_REGS_NUM (allocno) = ira_available_class_regs[cover_class];
1476 if (cover_class == NO_REGS)
1477 return;
1478 CLEAR_HARD_REG_SET (temp_set);
1479 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
1480 hard_regs_num = ira_class_hard_regs_num[cover_class];
1481 all_conflicting_hard_regs_coalesced (allocno, &temp_set);
1483 mode = ALLOCNO_MODE (allocno);
1484 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
1486 hard_regno = ira_class_hard_regs[cover_class][i];
1487 if (TEST_HARD_REG_BIT (temp_set, hard_regno)
1488 || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
1489 hard_regno))
1490 n++;
1492 if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
1493 fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
1494 ALLOCNO_REGNO (allocno), reg_class_names[cover_class], n);
1495 ALLOCNO_AVAILABLE_REGS_NUM (allocno) -= n;
1498 /* Set up ALLOCNO_LEFT_CONFLICTS_SIZE for ALLOCNO. */
1499 static void
1500 setup_allocno_left_conflicts_size (ira_allocno_t allocno)
1502 int i, hard_regs_num, hard_regno, conflict_allocnos_size;
1503 ira_allocno_t a;
1504 enum reg_class cover_class;
1505 HARD_REG_SET temp_set;
1507 cover_class = ALLOCNO_COVER_CLASS (allocno);
1508 hard_regs_num = ira_class_hard_regs_num[cover_class];
1509 CLEAR_HARD_REG_SET (temp_set);
1510 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) == allocno);
1511 all_conflicting_hard_regs_coalesced (allocno, &temp_set);
1513 AND_HARD_REG_SET (temp_set, reg_class_contents[cover_class]);
1514 AND_COMPL_HARD_REG_SET (temp_set, ira_no_alloc_regs);
1516 conflict_allocnos_size = 0;
1517 if (! hard_reg_set_empty_p (temp_set))
1518 for (i = 0; i < (int) hard_regs_num; i++)
1520 hard_regno = ira_class_hard_regs[cover_class][i];
1521 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
1523 conflict_allocnos_size++;
1524 CLEAR_HARD_REG_BIT (temp_set, hard_regno);
1525 if (hard_reg_set_empty_p (temp_set))
1526 break;
1529 CLEAR_HARD_REG_SET (temp_set);
1530 if (allocno_coalesced_p)
1531 bitmap_clear (processed_coalesced_allocno_bitmap);
1532 if (cover_class != NO_REGS)
1533 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
1534 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1536 int n = ALLOCNO_NUM_OBJECTS (a);
1537 for (i = 0; i < n; i++)
1539 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1540 ira_object_t conflict_obj;
1541 ira_object_conflict_iterator oci;
1543 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1545 ira_allocno_t conflict_allocno = OBJECT_ALLOCNO (conflict_obj);
1547 conflict_allocno
1548 = ALLOCNO_FIRST_COALESCED_ALLOCNO (conflict_allocno);
1549 if (!bitmap_bit_p (consideration_allocno_bitmap,
1550 ALLOCNO_NUM (conflict_allocno)))
1551 continue;
1553 ira_assert (cover_class
1554 == ALLOCNO_COVER_CLASS (conflict_allocno));
1555 if (allocno_coalesced_p)
1557 if (!bitmap_set_bit (processed_coalesced_allocno_bitmap,
1558 ALLOCNO_NUM (conflict_allocno)))
1559 continue;
1562 if (! ALLOCNO_ASSIGNED_P (conflict_allocno))
1563 conflict_allocnos_size
1564 += (ira_reg_class_nregs
1565 [cover_class][ALLOCNO_MODE (conflict_allocno)]);
1566 else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_allocno))
1567 >= 0)
1569 int last = (hard_regno
1570 + hard_regno_nregs
1571 [hard_regno][ALLOCNO_MODE (conflict_allocno)]);
1573 while (hard_regno < last)
1575 if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
1577 conflict_allocnos_size++;
1578 SET_HARD_REG_BIT (temp_set, hard_regno);
1580 hard_regno++;
1585 if (a == allocno)
1586 break;
1588 ALLOCNO_LEFT_CONFLICTS_SIZE (allocno) = conflict_allocnos_size;
1591 /* Put ALLOCNO in a bucket corresponding to its number and size of its
1592 conflicting allocnos and hard registers. */
1593 static void
1594 put_allocno_into_bucket (ira_allocno_t allocno)
1596 enum reg_class cover_class;
1598 cover_class = ALLOCNO_COVER_CLASS (allocno);
1599 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
1600 return;
1601 ALLOCNO_IN_GRAPH_P (allocno) = true;
1602 setup_allocno_left_conflicts_size (allocno);
1603 setup_allocno_available_regs_num (allocno);
1604 if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1605 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
1606 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1607 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
1608 else
1609 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
1612 /* The function is used to sort allocnos according to their execution
1613 frequencies. */
1614 static int
1615 copy_freq_compare_func (const void *v1p, const void *v2p)
1617 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
1618 int pri1, pri2;
1620 pri1 = cp1->freq;
1621 pri2 = cp2->freq;
1622 if (pri2 - pri1)
1623 return pri2 - pri1;
1625 /* If freqencies are equal, sort by copies, so that the results of
1626 qsort leave nothing to chance. */
1627 return cp1->num - cp2->num;
1630 /* Merge two sets of coalesced allocnos given correspondingly by
1631 allocnos A1 and A2 (more accurately merging A2 set into A1
1632 set). */
1633 static void
1634 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
1636 ira_allocno_t a, first, last, next;
1638 first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
1639 if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
1640 return;
1641 for (last = a2, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
1642 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1644 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = first;
1645 if (a == a2)
1646 break;
1647 last = a;
1649 next = ALLOCNO_NEXT_COALESCED_ALLOCNO (first);
1650 ALLOCNO_NEXT_COALESCED_ALLOCNO (first) = a2;
1651 ALLOCNO_NEXT_COALESCED_ALLOCNO (last) = next;
1654 /* Given two sets of coalesced sets of allocnos, A1 and A2, this
1655 function determines if any conflicts exist between the two sets.
1656 If RELOAD_P is TRUE, we use live ranges to find conflicts because
1657 conflicts are represented only for allocnos of the same cover class
1658 and during the reload pass we coalesce allocnos for sharing stack
1659 memory slots. */
1660 static bool
1661 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2,
1662 bool reload_p)
1664 ira_allocno_t a, conflict_allocno;
1666 /* When testing for conflicts, it is sufficient to examine only the
1667 subobjects of order 0, due to the canonicalization of conflicts
1668 we do in record_object_conflict. */
1670 bitmap_clear (processed_coalesced_allocno_bitmap);
1671 if (allocno_coalesced_p)
1673 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
1674 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1676 bitmap_set_bit (processed_coalesced_allocno_bitmap,
1677 OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (a, 0)));
1678 if (a == a1)
1679 break;
1682 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
1683 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
1685 if (reload_p)
1687 for (conflict_allocno = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
1688 conflict_allocno
1689 = ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
1691 if (allocnos_have_intersected_live_ranges_p (a,
1692 conflict_allocno))
1693 return true;
1694 if (conflict_allocno == a1)
1695 break;
1698 else
1700 ira_object_t a_obj = ALLOCNO_OBJECT (a, 0);
1701 ira_object_t conflict_obj;
1702 ira_object_conflict_iterator oci;
1703 FOR_EACH_OBJECT_CONFLICT (a_obj, conflict_obj, oci)
1704 if (conflict_obj == ALLOCNO_OBJECT (a1, 0)
1705 || (allocno_coalesced_p
1706 && bitmap_bit_p (processed_coalesced_allocno_bitmap,
1707 OBJECT_CONFLICT_ID (conflict_obj))))
1708 return true;
1711 if (a == a2)
1712 break;
1714 return false;
1717 /* The major function for aggressive allocno coalescing. For the
1718 reload pass (RELOAD_P) we coalesce only spilled allocnos. If some
1719 allocnos have been coalesced, we set up flag
1720 allocno_coalesced_p. */
1721 static void
1722 coalesce_allocnos (bool reload_p)
1724 ira_allocno_t a;
1725 ira_copy_t cp, next_cp, *sorted_copies;
1726 enum reg_class cover_class;
1727 enum machine_mode mode;
1728 unsigned int j;
1729 int i, n, cp_num, regno;
1730 bitmap_iterator bi;
1732 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
1733 * sizeof (ira_copy_t));
1734 cp_num = 0;
1735 /* Collect copies. */
1736 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
1738 a = ira_allocnos[j];
1739 regno = ALLOCNO_REGNO (a);
1740 if ((! reload_p && ALLOCNO_ASSIGNED_P (a))
1741 || (reload_p
1742 && (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
1743 || (regno < ira_reg_equiv_len
1744 && (ira_reg_equiv_const[regno] != NULL_RTX
1745 || ira_reg_equiv_invariant_p[regno])))))
1746 continue;
1747 cover_class = ALLOCNO_COVER_CLASS (a);
1748 mode = ALLOCNO_MODE (a);
1749 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1751 if (cp->first == a)
1753 next_cp = cp->next_first_allocno_copy;
1754 regno = ALLOCNO_REGNO (cp->second);
1755 /* For priority coloring we coalesce allocnos only with
1756 the same cover class not with intersected cover
1757 classes as it were possible. It is done for
1758 simplicity. */
1759 if ((reload_p
1760 || (ALLOCNO_COVER_CLASS (cp->second) == cover_class
1761 && ALLOCNO_MODE (cp->second) == mode))
1762 && (cp->insn != NULL || cp->constraint_p)
1763 && ((! reload_p && ! ALLOCNO_ASSIGNED_P (cp->second))
1764 || (reload_p
1765 && ALLOCNO_ASSIGNED_P (cp->second)
1766 && ALLOCNO_HARD_REGNO (cp->second) < 0
1767 && (regno >= ira_reg_equiv_len
1768 || (! ira_reg_equiv_invariant_p[regno]
1769 && ira_reg_equiv_const[regno] == NULL_RTX)))))
1770 sorted_copies[cp_num++] = cp;
1772 else if (cp->second == a)
1773 next_cp = cp->next_second_allocno_copy;
1774 else
1775 gcc_unreachable ();
1778 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
1779 /* Coalesced copies, most frequently executed first. */
1780 for (; cp_num != 0;)
1782 for (i = 0; i < cp_num; i++)
1784 cp = sorted_copies[i];
1785 if (! coalesced_allocno_conflict_p (cp->first, cp->second, reload_p))
1787 allocno_coalesced_p = true;
1788 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1789 fprintf
1790 (ira_dump_file,
1791 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
1792 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1793 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
1794 cp->freq);
1795 merge_allocnos (cp->first, cp->second);
1796 i++;
1797 break;
1800 /* Collect the rest of copies. */
1801 for (n = 0; i < cp_num; i++)
1803 cp = sorted_copies[i];
1804 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->first)
1805 != ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->second))
1806 sorted_copies[n++] = cp;
1808 cp_num = n;
1810 ira_free (sorted_copies);
1813 /* Map: allocno number -> allocno priority. */
1814 static int *allocno_priorities;
1816 /* Set up priorities for N allocnos in array
1817 CONSIDERATION_ALLOCNOS. */
1818 static void
1819 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
1821 int i, length, nrefs, priority, max_priority, mult;
1822 ira_allocno_t a;
1824 max_priority = 0;
1825 for (i = 0; i < n; i++)
1827 a = consideration_allocnos[i];
1828 nrefs = ALLOCNO_NREFS (a);
1829 ira_assert (nrefs >= 0);
1830 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
1831 ira_assert (mult >= 0);
1832 allocno_priorities[ALLOCNO_NUM (a)]
1833 = priority
1834 = (mult
1835 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
1836 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
1837 if (priority < 0)
1838 priority = -priority;
1839 if (max_priority < priority)
1840 max_priority = priority;
1842 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
1843 for (i = 0; i < n; i++)
1845 a = consideration_allocnos[i];
1846 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1847 if (ALLOCNO_NUM_OBJECTS (a) > 1)
1848 length /= ALLOCNO_NUM_OBJECTS (a);
1849 if (length <= 0)
1850 length = 1;
1851 allocno_priorities[ALLOCNO_NUM (a)]
1852 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
1856 /* Sort allocnos according to their priorities which are calculated
1857 analogous to ones in file `global.c'. */
1858 static int
1859 allocno_priority_compare_func (const void *v1p, const void *v2p)
1861 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1862 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1863 int pri1, pri2;
1865 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
1866 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
1867 if (pri2 != pri1)
1868 return SORTGT (pri2, pri1);
1870 /* If regs are equally good, sort by allocnos, so that the results of
1871 qsort leave nothing to chance. */
1872 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
1875 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
1876 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
1877 static void
1878 color_allocnos (void)
1880 unsigned int i, n;
1881 bitmap_iterator bi;
1882 ira_allocno_t a;
1884 allocno_coalesced_p = false;
1885 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
1886 if (flag_ira_coalesce)
1887 coalesce_allocnos (false);
1888 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
1890 n = 0;
1891 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1893 a = ira_allocnos[i];
1894 if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
1896 ALLOCNO_HARD_REGNO (a) = -1;
1897 ALLOCNO_ASSIGNED_P (a) = true;
1898 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
1899 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
1900 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1902 fprintf (ira_dump_file, " Spill");
1903 print_coalesced_allocno (a);
1904 fprintf (ira_dump_file, "\n");
1906 continue;
1908 sorted_allocnos[n++] = a;
1910 if (n != 0)
1912 setup_allocno_priorities (sorted_allocnos, n);
1913 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
1914 allocno_priority_compare_func);
1915 for (i = 0; i < n; i++)
1917 a = sorted_allocnos[i];
1918 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1920 fprintf (ira_dump_file, " ");
1921 print_coalesced_allocno (a);
1922 fprintf (ira_dump_file, " -- ");
1924 if (assign_hard_reg (a, false))
1926 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1927 fprintf (ira_dump_file, "assign hard reg %d\n",
1928 ALLOCNO_HARD_REGNO (a));
1930 else
1932 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1933 fprintf (ira_dump_file, "assign memory\n");
1938 else
1940 /* Put the allocnos into the corresponding buckets. */
1941 colorable_allocno_bucket = NULL;
1942 uncolorable_allocno_bucket = NULL;
1943 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1945 a = ira_allocnos[i];
1946 if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
1948 ALLOCNO_HARD_REGNO (a) = -1;
1949 ALLOCNO_ASSIGNED_P (a) = true;
1950 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
1951 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
1952 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1954 fprintf (ira_dump_file, " Spill");
1955 print_coalesced_allocno (a);
1956 fprintf (ira_dump_file, "\n");
1958 continue;
1960 put_allocno_into_bucket (a);
1962 push_allocnos_to_stack ();
1963 pop_allocnos_from_stack ();
1965 if (flag_ira_coalesce)
1966 /* We don't need coalesced allocnos for ira_reassign_pseudos. */
1967 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1969 a = ira_allocnos[i];
1970 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
1971 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
1973 ira_free_bitmap (processed_coalesced_allocno_bitmap);
1974 allocno_coalesced_p = false;
1979 /* Output information about the loop given by its LOOP_TREE_NODE. */
1980 static void
1981 print_loop_title (ira_loop_tree_node_t loop_tree_node)
1983 unsigned int j;
1984 bitmap_iterator bi;
1985 ira_loop_tree_node_t subloop_node, dest_loop_node;
1986 edge e;
1987 edge_iterator ei;
1989 ira_assert (loop_tree_node->loop != NULL);
1990 fprintf (ira_dump_file,
1991 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
1992 loop_tree_node->loop->num,
1993 (loop_tree_node->parent == NULL
1994 ? -1 : loop_tree_node->parent->loop->num),
1995 loop_tree_node->loop->header->index,
1996 loop_depth (loop_tree_node->loop));
1997 for (subloop_node = loop_tree_node->children;
1998 subloop_node != NULL;
1999 subloop_node = subloop_node->next)
2000 if (subloop_node->bb != NULL)
2002 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
2003 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
2004 if (e->dest != EXIT_BLOCK_PTR
2005 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
2006 != loop_tree_node))
2007 fprintf (ira_dump_file, "(->%d:l%d)",
2008 e->dest->index, dest_loop_node->loop->num);
2010 fprintf (ira_dump_file, "\n all:");
2011 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2012 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2013 fprintf (ira_dump_file, "\n modified regnos:");
2014 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
2015 fprintf (ira_dump_file, " %d", j);
2016 fprintf (ira_dump_file, "\n border:");
2017 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
2018 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
2019 fprintf (ira_dump_file, "\n Pressure:");
2020 for (j = 0; (int) j < ira_reg_class_cover_size; j++)
2022 enum reg_class cover_class;
2024 cover_class = ira_reg_class_cover[j];
2025 if (loop_tree_node->reg_pressure[cover_class] == 0)
2026 continue;
2027 fprintf (ira_dump_file, " %s=%d", reg_class_names[cover_class],
2028 loop_tree_node->reg_pressure[cover_class]);
2030 fprintf (ira_dump_file, "\n");
2033 /* Color the allocnos inside loop (in the extreme case it can be all
2034 of the function) given the corresponding LOOP_TREE_NODE. The
2035 function is called for each loop during top-down traverse of the
2036 loop tree. */
2037 static void
2038 color_pass (ira_loop_tree_node_t loop_tree_node)
2040 int regno, hard_regno, index = -1;
2041 int cost, exit_freq, enter_freq;
2042 unsigned int j;
2043 bitmap_iterator bi;
2044 enum machine_mode mode;
2045 enum reg_class rclass, cover_class;
2046 ira_allocno_t a, subloop_allocno;
2047 ira_loop_tree_node_t subloop_node;
2049 ira_assert (loop_tree_node->bb == NULL);
2050 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2051 print_loop_title (loop_tree_node);
2053 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
2054 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
2055 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2057 a = ira_allocnos[j];
2058 if (ALLOCNO_ASSIGNED_P (a))
2059 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
2061 /* Color all mentioned allocnos including transparent ones. */
2062 color_allocnos ();
2063 /* Process caps. They are processed just once. */
2064 if (flag_ira_region == IRA_REGION_MIXED
2065 || flag_ira_region == IRA_REGION_ALL)
2066 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
2068 a = ira_allocnos[j];
2069 if (ALLOCNO_CAP_MEMBER (a) == NULL)
2070 continue;
2071 /* Remove from processing in the next loop. */
2072 bitmap_clear_bit (consideration_allocno_bitmap, j);
2073 rclass = ALLOCNO_COVER_CLASS (a);
2074 if (flag_ira_region == IRA_REGION_MIXED
2075 && (loop_tree_node->reg_pressure[rclass]
2076 <= ira_available_class_regs[rclass]))
2078 mode = ALLOCNO_MODE (a);
2079 hard_regno = ALLOCNO_HARD_REGNO (a);
2080 if (hard_regno >= 0)
2082 index = ira_class_hard_reg_index[rclass][hard_regno];
2083 ira_assert (index >= 0);
2085 regno = ALLOCNO_REGNO (a);
2086 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
2087 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
2088 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
2089 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2090 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2091 if (hard_regno >= 0)
2092 update_copy_costs (subloop_allocno, true);
2093 /* We don't need updated costs anymore: */
2094 ira_free_allocno_updated_costs (subloop_allocno);
2097 /* Update costs of the corresponding allocnos (not caps) in the
2098 subloops. */
2099 for (subloop_node = loop_tree_node->subloops;
2100 subloop_node != NULL;
2101 subloop_node = subloop_node->subloop_next)
2103 ira_assert (subloop_node->bb == NULL);
2104 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
2106 a = ira_allocnos[j];
2107 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2108 mode = ALLOCNO_MODE (a);
2109 rclass = ALLOCNO_COVER_CLASS (a);
2110 hard_regno = ALLOCNO_HARD_REGNO (a);
2111 /* Use hard register class here. ??? */
2112 if (hard_regno >= 0)
2114 index = ira_class_hard_reg_index[rclass][hard_regno];
2115 ira_assert (index >= 0);
2117 regno = ALLOCNO_REGNO (a);
2118 /* ??? conflict costs */
2119 subloop_allocno = subloop_node->regno_allocno_map[regno];
2120 if (subloop_allocno == NULL
2121 || ALLOCNO_CAP (subloop_allocno) != NULL)
2122 continue;
2123 ira_assert (ALLOCNO_COVER_CLASS (subloop_allocno) == rclass);
2124 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
2125 ALLOCNO_NUM (subloop_allocno)));
2126 if ((flag_ira_region == IRA_REGION_MIXED)
2127 && (loop_tree_node->reg_pressure[rclass]
2128 <= ira_available_class_regs[rclass]))
2130 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2132 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2133 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2134 if (hard_regno >= 0)
2135 update_copy_costs (subloop_allocno, true);
2136 /* We don't need updated costs anymore: */
2137 ira_free_allocno_updated_costs (subloop_allocno);
2139 continue;
2141 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2142 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2143 ira_assert (regno < ira_reg_equiv_len);
2144 if (ira_reg_equiv_invariant_p[regno]
2145 || ira_reg_equiv_const[regno] != NULL_RTX)
2147 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
2149 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
2150 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
2151 if (hard_regno >= 0)
2152 update_copy_costs (subloop_allocno, true);
2153 /* We don't need updated costs anymore: */
2154 ira_free_allocno_updated_costs (subloop_allocno);
2157 else if (hard_regno < 0)
2159 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2160 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
2161 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
2163 else
2165 cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
2166 cost = (ira_get_register_move_cost (mode, rclass, rclass)
2167 * (exit_freq + enter_freq));
2168 ira_allocate_and_set_or_copy_costs
2169 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
2170 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno),
2171 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
2172 ira_allocate_and_set_or_copy_costs
2173 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
2174 cover_class, 0,
2175 ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
2176 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
2177 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
2178 -= cost;
2179 if (ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
2180 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
2181 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
2182 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
2183 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
2184 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
2185 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
2191 /* Initialize the common data for coloring and calls functions to do
2192 Chaitin-Briggs and regional coloring. */
2193 static void
2194 do_coloring (void)
2196 coloring_allocno_bitmap = ira_allocate_bitmap ();
2197 allocnos_for_spilling
2198 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2199 * ira_allocnos_num);
2200 splay_tree_node_pool = create_alloc_pool ("splay tree nodes",
2201 sizeof (struct splay_tree_node_s),
2202 100);
2203 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2204 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
2206 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
2208 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2209 ira_print_disposition (ira_dump_file);
2211 free_alloc_pool (splay_tree_node_pool);
2212 ira_free_bitmap (coloring_allocno_bitmap);
2213 ira_free (allocnos_for_spilling);
2218 /* Move spill/restore code, which are to be generated in ira-emit.c,
2219 to less frequent points (if it is profitable) by reassigning some
2220 allocnos (in loop with subloops containing in another loop) to
2221 memory which results in longer live-range where the corresponding
2222 pseudo-registers will be in memory. */
2223 static void
2224 move_spill_restore (void)
2226 int cost, regno, hard_regno, hard_regno2, index;
2227 bool changed_p;
2228 int enter_freq, exit_freq;
2229 enum machine_mode mode;
2230 enum reg_class rclass;
2231 ira_allocno_t a, parent_allocno, subloop_allocno;
2232 ira_loop_tree_node_t parent, loop_node, subloop_node;
2233 ira_allocno_iterator ai;
2235 for (;;)
2237 changed_p = false;
2238 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2239 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
2240 FOR_EACH_ALLOCNO (a, ai)
2242 regno = ALLOCNO_REGNO (a);
2243 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
2244 if (ALLOCNO_CAP_MEMBER (a) != NULL
2245 || ALLOCNO_CAP (a) != NULL
2246 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
2247 || loop_node->children == NULL
2248 /* don't do the optimization because it can create
2249 copies and the reload pass can spill the allocno set
2250 by copy although the allocno will not get memory
2251 slot. */
2252 || ira_reg_equiv_invariant_p[regno]
2253 || ira_reg_equiv_const[regno] != NULL_RTX
2254 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
2255 continue;
2256 mode = ALLOCNO_MODE (a);
2257 rclass = ALLOCNO_COVER_CLASS (a);
2258 index = ira_class_hard_reg_index[rclass][hard_regno];
2259 ira_assert (index >= 0);
2260 cost = (ALLOCNO_MEMORY_COST (a)
2261 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2262 ? ALLOCNO_COVER_CLASS_COST (a)
2263 : ALLOCNO_HARD_REG_COSTS (a)[index]));
2264 for (subloop_node = loop_node->subloops;
2265 subloop_node != NULL;
2266 subloop_node = subloop_node->subloop_next)
2268 ira_assert (subloop_node->bb == NULL);
2269 subloop_allocno = subloop_node->regno_allocno_map[regno];
2270 if (subloop_allocno == NULL)
2271 continue;
2272 ira_assert (rclass == ALLOCNO_COVER_CLASS (subloop_allocno));
2273 /* We have accumulated cost. To get the real cost of
2274 allocno usage in the loop we should subtract costs of
2275 the subloop allocnos. */
2276 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
2277 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
2278 ? ALLOCNO_COVER_CLASS_COST (subloop_allocno)
2279 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
2280 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
2281 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
2282 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
2283 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2284 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2285 else
2287 cost
2288 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
2289 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2290 if (hard_regno2 != hard_regno)
2291 cost -= (ira_get_register_move_cost (mode, rclass, rclass)
2292 * (exit_freq + enter_freq));
2295 if ((parent = loop_node->parent) != NULL
2296 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
2298 ira_assert (rclass == ALLOCNO_COVER_CLASS (parent_allocno));
2299 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
2300 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
2301 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
2302 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
2303 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
2304 else
2306 cost
2307 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
2308 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
2309 if (hard_regno2 != hard_regno)
2310 cost -= (ira_get_register_move_cost (mode, rclass, rclass)
2311 * (exit_freq + enter_freq));
2314 if (cost < 0)
2316 ALLOCNO_HARD_REGNO (a) = -1;
2317 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2319 fprintf
2320 (ira_dump_file,
2321 " Moving spill/restore for a%dr%d up from loop %d",
2322 ALLOCNO_NUM (a), regno, loop_node->loop->num);
2323 fprintf (ira_dump_file, " - profit %d\n", -cost);
2325 changed_p = true;
2328 if (! changed_p)
2329 break;
2335 /* Update current hard reg costs and current conflict hard reg costs
2336 for allocno A. It is done by processing its copies containing
2337 other allocnos already assigned. */
2338 static void
2339 update_curr_costs (ira_allocno_t a)
2341 int i, hard_regno, cost;
2342 enum machine_mode mode;
2343 enum reg_class cover_class, rclass;
2344 ira_allocno_t another_a;
2345 ira_copy_t cp, next_cp;
2347 ira_free_allocno_updated_costs (a);
2348 ira_assert (! ALLOCNO_ASSIGNED_P (a));
2349 cover_class = ALLOCNO_COVER_CLASS (a);
2350 if (cover_class == NO_REGS)
2351 return;
2352 mode = ALLOCNO_MODE (a);
2353 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2355 if (cp->first == a)
2357 next_cp = cp->next_first_allocno_copy;
2358 another_a = cp->second;
2360 else if (cp->second == a)
2362 next_cp = cp->next_second_allocno_copy;
2363 another_a = cp->first;
2365 else
2366 gcc_unreachable ();
2367 if (! ira_reg_classes_intersect_p[cover_class][ALLOCNO_COVER_CLASS
2368 (another_a)]
2369 || ! ALLOCNO_ASSIGNED_P (another_a)
2370 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
2371 continue;
2372 rclass = REGNO_REG_CLASS (hard_regno);
2373 i = ira_class_hard_reg_index[cover_class][hard_regno];
2374 if (i < 0)
2375 continue;
2376 cost = (cp->first == a
2377 ? ira_get_register_move_cost (mode, rclass, cover_class)
2378 : ira_get_register_move_cost (mode, cover_class, rclass));
2379 ira_allocate_and_set_or_copy_costs
2380 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
2381 cover_class, ALLOCNO_COVER_CLASS_COST (a),
2382 ALLOCNO_HARD_REG_COSTS (a));
2383 ira_allocate_and_set_or_copy_costs
2384 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
2385 cover_class, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
2386 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
2387 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
2391 /* Try to assign hard registers to the unassigned allocnos and
2392 allocnos conflicting with them or conflicting with allocnos whose
2393 regno >= START_REGNO. The function is called after ira_flattening,
2394 so more allocnos (including ones created in ira-emit.c) will have a
2395 chance to get a hard register. We use simple assignment algorithm
2396 based on priorities. */
2397 void
2398 ira_reassign_conflict_allocnos (int start_regno)
2400 int i, allocnos_to_color_num;
2401 ira_allocno_t a;
2402 enum reg_class cover_class;
2403 bitmap allocnos_to_color;
2404 ira_allocno_iterator ai;
2406 allocnos_to_color = ira_allocate_bitmap ();
2407 allocnos_to_color_num = 0;
2408 FOR_EACH_ALLOCNO (a, ai)
2410 int n = ALLOCNO_NUM_OBJECTS (a);
2412 if (! ALLOCNO_ASSIGNED_P (a)
2413 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
2415 if (ALLOCNO_COVER_CLASS (a) != NO_REGS)
2416 sorted_allocnos[allocnos_to_color_num++] = a;
2417 else
2419 ALLOCNO_ASSIGNED_P (a) = true;
2420 ALLOCNO_HARD_REGNO (a) = -1;
2421 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2422 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2424 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
2426 if (ALLOCNO_REGNO (a) < start_regno
2427 || (cover_class = ALLOCNO_COVER_CLASS (a)) == NO_REGS)
2428 continue;
2429 for (i = 0; i < n; i++)
2431 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2432 ira_object_t conflict_obj;
2433 ira_object_conflict_iterator oci;
2434 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2436 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2437 ira_assert (ira_reg_classes_intersect_p
2438 [cover_class][ALLOCNO_COVER_CLASS (conflict_a)]);
2439 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
2440 continue;
2441 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
2445 ira_free_bitmap (allocnos_to_color);
2446 if (allocnos_to_color_num > 1)
2448 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
2449 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
2450 allocno_priority_compare_func);
2452 for (i = 0; i < allocnos_to_color_num; i++)
2454 a = sorted_allocnos[i];
2455 ALLOCNO_ASSIGNED_P (a) = false;
2456 update_curr_costs (a);
2458 for (i = 0; i < allocnos_to_color_num; i++)
2460 a = sorted_allocnos[i];
2461 if (assign_hard_reg (a, true))
2463 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2464 fprintf
2465 (ira_dump_file,
2466 " Secondary allocation: assign hard reg %d to reg %d\n",
2467 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
2474 /* This page contains code to coalesce memory stack slots used by
2475 spilled allocnos. This results in smaller stack frame, better data
2476 locality, and in smaller code for some architectures like
2477 x86/x86_64 where insn size depends on address displacement value.
2478 On the other hand, it can worsen insn scheduling after the RA but
2479 in practice it is less important than smaller stack frames. */
2481 /* Usage cost and order number of coalesced allocno set to which
2482 given pseudo register belongs to. */
2483 static int *regno_coalesced_allocno_cost;
2484 static int *regno_coalesced_allocno_num;
2486 /* Sort pseudos according frequencies of coalesced allocno sets they
2487 belong to (putting most frequently ones first), and according to
2488 coalesced allocno set order numbers. */
2489 static int
2490 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
2492 const int regno1 = *(const int *) v1p;
2493 const int regno2 = *(const int *) v2p;
2494 int diff;
2496 if ((diff = (regno_coalesced_allocno_cost[regno2]
2497 - regno_coalesced_allocno_cost[regno1])) != 0)
2498 return diff;
2499 if ((diff = (regno_coalesced_allocno_num[regno1]
2500 - regno_coalesced_allocno_num[regno2])) != 0)
2501 return diff;
2502 return regno1 - regno2;
2505 /* Widest width in which each pseudo reg is referred to (via subreg).
2506 It is used for sorting pseudo registers. */
2507 static unsigned int *regno_max_ref_width;
2509 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
2510 #ifdef STACK_GROWS_DOWNWARD
2511 # undef STACK_GROWS_DOWNWARD
2512 # define STACK_GROWS_DOWNWARD 1
2513 #else
2514 # define STACK_GROWS_DOWNWARD 0
2515 #endif
2517 /* Sort pseudos according their slot numbers (putting ones with
2518 smaller numbers first, or last when the frame pointer is not
2519 needed). */
2520 static int
2521 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
2523 const int regno1 = *(const int *) v1p;
2524 const int regno2 = *(const int *) v2p;
2525 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
2526 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
2527 int diff, slot_num1, slot_num2;
2528 int total_size1, total_size2;
2530 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
2532 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2533 return regno1 - regno2;
2534 return 1;
2536 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2537 return -1;
2538 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
2539 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
2540 if ((diff = slot_num1 - slot_num2) != 0)
2541 return (frame_pointer_needed
2542 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
2543 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1), regno_max_ref_width[regno1]);
2544 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2), regno_max_ref_width[regno2]);
2545 if ((diff = total_size2 - total_size1) != 0)
2546 return diff;
2547 return regno1 - regno2;
2550 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
2551 for coalesced allocno sets containing allocnos with their regnos
2552 given in array PSEUDO_REGNOS of length N. */
2553 static void
2554 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
2556 int i, num, regno, cost;
2557 ira_allocno_t allocno, a;
2559 for (num = i = 0; i < n; i++)
2561 regno = pseudo_regnos[i];
2562 allocno = ira_regno_allocno_map[regno];
2563 if (allocno == NULL)
2565 regno_coalesced_allocno_cost[regno] = 0;
2566 regno_coalesced_allocno_num[regno] = ++num;
2567 continue;
2569 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2570 continue;
2571 num++;
2572 for (cost = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2573 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2575 cost += ALLOCNO_FREQ (a);
2576 if (a == allocno)
2577 break;
2579 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2580 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2582 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
2583 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
2584 if (a == allocno)
2585 break;
2590 /* Collect spilled allocnos representing coalesced allocno sets (the
2591 first coalesced allocno). The collected allocnos are returned
2592 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
2593 number of the collected allocnos. The allocnos are given by their
2594 regnos in array PSEUDO_REGNOS of length N. */
2595 static int
2596 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
2597 ira_allocno_t *spilled_coalesced_allocnos)
2599 int i, num, regno;
2600 ira_allocno_t allocno;
2602 for (num = i = 0; i < n; i++)
2604 regno = pseudo_regnos[i];
2605 allocno = ira_regno_allocno_map[regno];
2606 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
2607 || ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2608 continue;
2609 spilled_coalesced_allocnos[num++] = allocno;
2611 return num;
2614 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
2615 given slot contains live ranges of coalesced allocnos assigned to
2616 given slot. */
2617 static live_range_t *slot_coalesced_allocnos_live_ranges;
2619 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
2620 ranges intersected with live ranges of coalesced allocnos assigned
2621 to slot with number N. */
2622 static bool
2623 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
2625 ira_allocno_t a;
2627 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2628 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2630 int i;
2631 int nr = ALLOCNO_NUM_OBJECTS (a);
2632 for (i = 0; i < nr; i++)
2634 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2635 if (ira_live_ranges_intersect_p (slot_coalesced_allocnos_live_ranges[n],
2636 OBJECT_LIVE_RANGES (obj)))
2637 return true;
2639 if (a == allocno)
2640 break;
2642 return false;
2645 /* Update live ranges of slot to which coalesced allocnos represented
2646 by ALLOCNO were assigned. */
2647 static void
2648 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
2650 int i, n;
2651 ira_allocno_t a;
2652 live_range_t r;
2654 n = ALLOCNO_TEMP (allocno);
2655 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2656 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2658 int nr = ALLOCNO_NUM_OBJECTS (a);
2659 for (i = 0; i < nr; i++)
2661 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2662 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
2663 slot_coalesced_allocnos_live_ranges[n]
2664 = ira_merge_live_ranges
2665 (slot_coalesced_allocnos_live_ranges[n], r);
2667 if (a == allocno)
2668 break;
2672 /* We have coalesced allocnos involving in copies. Coalesce allocnos
2673 further in order to share the same memory stack slot. Allocnos
2674 representing sets of allocnos coalesced before the call are given
2675 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
2676 some allocnos were coalesced in the function. */
2677 static bool
2678 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
2680 int i, j, n, last_coalesced_allocno_num;
2681 ira_allocno_t allocno, a;
2682 bool merged_p = false;
2683 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
2685 slot_coalesced_allocnos_live_ranges
2686 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
2687 memset (slot_coalesced_allocnos_live_ranges, 0,
2688 sizeof (live_range_t) * ira_allocnos_num);
2689 last_coalesced_allocno_num = 0;
2690 /* Coalesce non-conflicting spilled allocnos preferring most
2691 frequently used. */
2692 for (i = 0; i < num; i++)
2694 allocno = spilled_coalesced_allocnos[i];
2695 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2696 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
2697 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2698 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
2699 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
2700 continue;
2701 for (j = 0; j < i; j++)
2703 a = spilled_coalesced_allocnos[j];
2704 n = ALLOCNO_TEMP (a);
2705 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
2706 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
2707 && (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
2708 || (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
2709 && ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
2710 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
2711 break;
2713 if (j >= i)
2715 /* No coalescing: set up number for coalesced allocnos
2716 represented by ALLOCNO. */
2717 ALLOCNO_TEMP (allocno) = last_coalesced_allocno_num++;
2718 setup_slot_coalesced_allocno_live_ranges (allocno);
2720 else
2722 allocno_coalesced_p = true;
2723 merged_p = true;
2724 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2725 fprintf (ira_dump_file,
2726 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
2727 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
2728 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2729 ALLOCNO_TEMP (allocno) = ALLOCNO_TEMP (a);
2730 setup_slot_coalesced_allocno_live_ranges (allocno);
2731 merge_allocnos (a, allocno);
2732 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
2735 for (i = 0; i < ira_allocnos_num; i++)
2736 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
2737 ira_free (slot_coalesced_allocnos_live_ranges);
2738 return merged_p;
2741 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
2742 subsequent assigning stack slots to them in the reload pass. To do
2743 this we coalesce spilled allocnos first to decrease the number of
2744 memory-memory move insns. This function is called by the
2745 reload. */
2746 void
2747 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
2748 unsigned int *reg_max_ref_width)
2750 int max_regno = max_reg_num ();
2751 int i, regno, num, slot_num;
2752 ira_allocno_t allocno, a;
2753 ira_allocno_iterator ai;
2754 ira_allocno_t *spilled_coalesced_allocnos;
2756 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
2757 /* Set up allocnos can be coalesced. */
2758 coloring_allocno_bitmap = ira_allocate_bitmap ();
2759 for (i = 0; i < n; i++)
2761 regno = pseudo_regnos[i];
2762 allocno = ira_regno_allocno_map[regno];
2763 if (allocno != NULL)
2764 bitmap_set_bit (coloring_allocno_bitmap,
2765 ALLOCNO_NUM (allocno));
2767 allocno_coalesced_p = false;
2768 coalesce_allocnos (true);
2769 ira_free_bitmap (coloring_allocno_bitmap);
2770 regno_coalesced_allocno_cost
2771 = (int *) ira_allocate (max_regno * sizeof (int));
2772 regno_coalesced_allocno_num
2773 = (int *) ira_allocate (max_regno * sizeof (int));
2774 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
2775 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2776 /* Sort regnos according frequencies of the corresponding coalesced
2777 allocno sets. */
2778 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
2779 spilled_coalesced_allocnos
2780 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
2781 * sizeof (ira_allocno_t));
2782 /* Collect allocnos representing the spilled coalesced allocno
2783 sets. */
2784 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2785 spilled_coalesced_allocnos);
2786 if (flag_ira_share_spill_slots
2787 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
2789 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2790 qsort (pseudo_regnos, n, sizeof (int),
2791 coalesced_pseudo_reg_freq_compare);
2792 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2793 spilled_coalesced_allocnos);
2795 ira_free_bitmap (processed_coalesced_allocno_bitmap);
2796 allocno_coalesced_p = false;
2797 /* Assign stack slot numbers to spilled allocno sets, use smaller
2798 numbers for most frequently used coalesced allocnos. -1 is
2799 reserved for dynamic search of stack slots for pseudos spilled by
2800 the reload. */
2801 slot_num = 1;
2802 for (i = 0; i < num; i++)
2804 allocno = spilled_coalesced_allocnos[i];
2805 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2806 || ALLOCNO_HARD_REGNO (allocno) >= 0
2807 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2808 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
2809 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
2810 continue;
2811 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2812 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
2813 slot_num++;
2814 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2815 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2817 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
2818 ALLOCNO_HARD_REGNO (a) = -slot_num;
2819 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2820 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
2821 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
2822 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
2823 reg_max_ref_width[ALLOCNO_REGNO (a)]));
2825 if (a == allocno)
2826 break;
2828 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2829 fprintf (ira_dump_file, "\n");
2831 ira_spilled_reg_stack_slots_num = slot_num - 1;
2832 ira_free (spilled_coalesced_allocnos);
2833 /* Sort regnos according the slot numbers. */
2834 regno_max_ref_width = reg_max_ref_width;
2835 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
2836 /* Uncoalesce allocnos which is necessary for (re)assigning during
2837 the reload pass. */
2838 FOR_EACH_ALLOCNO (a, ai)
2840 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
2841 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
2843 ira_free (regno_coalesced_allocno_num);
2844 ira_free (regno_coalesced_allocno_cost);
2849 /* This page contains code used by the reload pass to improve the
2850 final code. */
2852 /* The function is called from reload to mark changes in the
2853 allocation of REGNO made by the reload. Remember that reg_renumber
2854 reflects the change result. */
2855 void
2856 ira_mark_allocation_change (int regno)
2858 ira_allocno_t a = ira_regno_allocno_map[regno];
2859 int old_hard_regno, hard_regno, cost;
2860 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
2862 ira_assert (a != NULL);
2863 hard_regno = reg_renumber[regno];
2864 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
2865 return;
2866 if (old_hard_regno < 0)
2867 cost = -ALLOCNO_MEMORY_COST (a);
2868 else
2870 ira_assert (ira_class_hard_reg_index[cover_class][old_hard_regno] >= 0);
2871 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
2872 ? ALLOCNO_COVER_CLASS_COST (a)
2873 : ALLOCNO_HARD_REG_COSTS (a)
2874 [ira_class_hard_reg_index[cover_class][old_hard_regno]]);
2875 update_copy_costs (a, false);
2877 ira_overall_cost -= cost;
2878 ALLOCNO_HARD_REGNO (a) = hard_regno;
2879 if (hard_regno < 0)
2881 ALLOCNO_HARD_REGNO (a) = -1;
2882 cost += ALLOCNO_MEMORY_COST (a);
2884 else if (ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
2886 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
2887 ? ALLOCNO_COVER_CLASS_COST (a)
2888 : ALLOCNO_HARD_REG_COSTS (a)
2889 [ira_class_hard_reg_index[cover_class][hard_regno]]);
2890 update_copy_costs (a, true);
2892 else
2893 /* Reload changed class of the allocno. */
2894 cost = 0;
2895 ira_overall_cost += cost;
2898 /* This function is called when reload deletes memory-memory move. In
2899 this case we marks that the allocation of the corresponding
2900 allocnos should be not changed in future. Otherwise we risk to get
2901 a wrong code. */
2902 void
2903 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
2905 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
2906 ira_allocno_t src = ira_regno_allocno_map[src_regno];
2908 ira_assert (dst != NULL && src != NULL
2909 && ALLOCNO_HARD_REGNO (dst) < 0
2910 && ALLOCNO_HARD_REGNO (src) < 0);
2911 ALLOCNO_DONT_REASSIGN_P (dst) = true;
2912 ALLOCNO_DONT_REASSIGN_P (src) = true;
2915 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
2916 allocno A and return TRUE in the case of success. */
2917 static bool
2918 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
2920 int hard_regno;
2921 enum reg_class cover_class;
2922 int regno = ALLOCNO_REGNO (a);
2923 HARD_REG_SET saved[2];
2924 int i, n;
2926 n = ALLOCNO_NUM_OBJECTS (a);
2927 for (i = 0; i < n; i++)
2929 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2930 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2931 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
2932 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
2933 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2934 call_used_reg_set);
2936 ALLOCNO_ASSIGNED_P (a) = false;
2937 cover_class = ALLOCNO_COVER_CLASS (a);
2938 update_curr_costs (a);
2939 assign_hard_reg (a, true);
2940 hard_regno = ALLOCNO_HARD_REGNO (a);
2941 reg_renumber[regno] = hard_regno;
2942 if (hard_regno < 0)
2943 ALLOCNO_HARD_REGNO (a) = -1;
2944 else
2946 ira_assert (ira_class_hard_reg_index[cover_class][hard_regno] >= 0);
2947 ira_overall_cost -= (ALLOCNO_MEMORY_COST (a)
2948 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2949 ? ALLOCNO_COVER_CLASS_COST (a)
2950 : ALLOCNO_HARD_REG_COSTS (a)
2951 [ira_class_hard_reg_index
2952 [cover_class][hard_regno]]));
2953 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
2954 && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
2955 call_used_reg_set))
2957 ira_assert (flag_caller_saves);
2958 caller_save_needed = 1;
2962 /* If we found a hard register, modify the RTL for the pseudo
2963 register to show the hard register, and mark the pseudo register
2964 live. */
2965 if (reg_renumber[regno] >= 0)
2967 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2968 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
2969 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
2970 mark_home_live (regno);
2972 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2973 fprintf (ira_dump_file, "\n");
2974 for (i = 0; i < n; i++)
2976 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2977 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
2979 return reg_renumber[regno] >= 0;
2982 /* Sort pseudos according their usage frequencies (putting most
2983 frequently ones first). */
2984 static int
2985 pseudo_reg_compare (const void *v1p, const void *v2p)
2987 int regno1 = *(const int *) v1p;
2988 int regno2 = *(const int *) v2p;
2989 int diff;
2991 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
2992 return diff;
2993 return regno1 - regno2;
2996 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
2997 NUM of them) or spilled pseudos conflicting with pseudos in
2998 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
2999 allocation has been changed. The function doesn't use
3000 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
3001 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
3002 is called by the reload pass at the end of each reload
3003 iteration. */
3004 bool
3005 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
3006 HARD_REG_SET bad_spill_regs,
3007 HARD_REG_SET *pseudo_forbidden_regs,
3008 HARD_REG_SET *pseudo_previous_regs,
3009 bitmap spilled)
3011 int i, n, regno;
3012 bool changed_p;
3013 ira_allocno_t a;
3014 HARD_REG_SET forbidden_regs;
3015 bitmap temp = BITMAP_ALLOC (NULL);
3017 /* Add pseudos which conflict with pseudos already in
3018 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
3019 to allocating in two steps as some of the conflicts might have
3020 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
3021 for (i = 0; i < num; i++)
3022 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
3024 for (i = 0, n = num; i < n; i++)
3026 int nr, j;
3027 int regno = spilled_pseudo_regs[i];
3028 bitmap_set_bit (temp, regno);
3030 a = ira_regno_allocno_map[regno];
3031 nr = ALLOCNO_NUM_OBJECTS (a);
3032 for (j = 0; j < nr; j++)
3034 ira_object_t conflict_obj;
3035 ira_object_t obj = ALLOCNO_OBJECT (a, j);
3036 ira_object_conflict_iterator oci;
3038 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
3040 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
3041 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
3042 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
3043 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
3045 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
3046 /* ?!? This seems wrong. */
3047 bitmap_set_bit (consideration_allocno_bitmap,
3048 ALLOCNO_NUM (conflict_a));
3054 if (num > 1)
3055 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
3056 changed_p = false;
3057 /* Try to assign hard registers to pseudos from
3058 SPILLED_PSEUDO_REGS. */
3059 for (i = 0; i < num; i++)
3061 regno = spilled_pseudo_regs[i];
3062 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
3063 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
3064 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
3065 gcc_assert (reg_renumber[regno] < 0);
3066 a = ira_regno_allocno_map[regno];
3067 ira_mark_allocation_change (regno);
3068 ira_assert (reg_renumber[regno] < 0);
3069 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
3070 fprintf (ira_dump_file,
3071 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
3072 ALLOCNO_MEMORY_COST (a)
3073 - ALLOCNO_COVER_CLASS_COST (a));
3074 allocno_reload_assign (a, forbidden_regs);
3075 if (reg_renumber[regno] >= 0)
3077 CLEAR_REGNO_REG_SET (spilled, regno);
3078 changed_p = true;
3081 BITMAP_FREE (temp);
3082 return changed_p;
3085 /* The function is called by reload and returns already allocated
3086 stack slot (if any) for REGNO with given INHERENT_SIZE and
3087 TOTAL_SIZE. In the case of failure to find a slot which can be
3088 used for REGNO, the function returns NULL. */
3090 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
3091 unsigned int total_size)
3093 unsigned int i;
3094 int slot_num, best_slot_num;
3095 int cost, best_cost;
3096 ira_copy_t cp, next_cp;
3097 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
3098 rtx x;
3099 bitmap_iterator bi;
3100 struct ira_spilled_reg_stack_slot *slot = NULL;
3102 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
3103 && inherent_size <= total_size
3104 && ALLOCNO_HARD_REGNO (allocno) < 0);
3105 if (! flag_ira_share_spill_slots)
3106 return NULL_RTX;
3107 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
3108 if (slot_num != -1)
3110 slot = &ira_spilled_reg_stack_slots[slot_num];
3111 x = slot->mem;
3113 else
3115 best_cost = best_slot_num = -1;
3116 x = NULL_RTX;
3117 /* It means that the pseudo was spilled in the reload pass, try
3118 to reuse a slot. */
3119 for (slot_num = 0;
3120 slot_num < ira_spilled_reg_stack_slots_num;
3121 slot_num++)
3123 slot = &ira_spilled_reg_stack_slots[slot_num];
3124 if (slot->mem == NULL_RTX)
3125 continue;
3126 if (slot->width < total_size
3127 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
3128 continue;
3130 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
3131 FIRST_PSEUDO_REGISTER, i, bi)
3133 another_allocno = ira_regno_allocno_map[i];
3134 if (allocnos_have_intersected_live_ranges_p (allocno,
3135 another_allocno))
3136 goto cont;
3138 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
3139 cp != NULL;
3140 cp = next_cp)
3142 if (cp->first == allocno)
3144 next_cp = cp->next_first_allocno_copy;
3145 another_allocno = cp->second;
3147 else if (cp->second == allocno)
3149 next_cp = cp->next_second_allocno_copy;
3150 another_allocno = cp->first;
3152 else
3153 gcc_unreachable ();
3154 if (cp->insn == NULL_RTX)
3155 continue;
3156 if (bitmap_bit_p (&slot->spilled_regs,
3157 ALLOCNO_REGNO (another_allocno)))
3158 cost += cp->freq;
3160 if (cost > best_cost)
3162 best_cost = cost;
3163 best_slot_num = slot_num;
3165 cont:
3168 if (best_cost >= 0)
3170 slot_num = best_slot_num;
3171 slot = &ira_spilled_reg_stack_slots[slot_num];
3172 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
3173 x = slot->mem;
3174 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
3177 if (x != NULL_RTX)
3179 ira_assert (slot->width >= total_size);
3180 #ifdef ENABLE_IRA_CHECKING
3181 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
3182 FIRST_PSEUDO_REGISTER, i, bi)
3184 ira_assert (! pseudos_have_intersected_live_ranges_p (regno, i));
3186 #endif
3187 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
3188 if (internal_flag_ira_verbose > 3 && ira_dump_file)
3190 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
3191 regno, REG_FREQ (regno), slot_num);
3192 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
3193 FIRST_PSEUDO_REGISTER, i, bi)
3195 if ((unsigned) regno != i)
3196 fprintf (ira_dump_file, " %d", i);
3198 fprintf (ira_dump_file, "\n");
3201 return x;
3204 /* This is called by reload every time a new stack slot X with
3205 TOTAL_SIZE was allocated for REGNO. We store this info for
3206 subsequent ira_reuse_stack_slot calls. */
3207 void
3208 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
3210 struct ira_spilled_reg_stack_slot *slot;
3211 int slot_num;
3212 ira_allocno_t allocno;
3214 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
3215 allocno = ira_regno_allocno_map[regno];
3216 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
3217 if (slot_num == -1)
3219 slot_num = ira_spilled_reg_stack_slots_num++;
3220 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
3222 slot = &ira_spilled_reg_stack_slots[slot_num];
3223 INIT_REG_SET (&slot->spilled_regs);
3224 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
3225 slot->mem = x;
3226 slot->width = total_size;
3227 if (internal_flag_ira_verbose > 3 && ira_dump_file)
3228 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
3229 regno, REG_FREQ (regno), slot_num);
3233 /* Return spill cost for pseudo-registers whose numbers are in array
3234 REGNOS (with a negative number as an end marker) for reload with
3235 given IN and OUT for INSN. Return also number points (through
3236 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
3237 the register pressure is high, number of references of the
3238 pseudo-registers (through NREFS), number of callee-clobbered
3239 hard-registers occupied by the pseudo-registers (through
3240 CALL_USED_COUNT), and the first hard regno occupied by the
3241 pseudo-registers (through FIRST_HARD_REGNO). */
3242 static int
3243 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
3244 int *excess_pressure_live_length,
3245 int *nrefs, int *call_used_count, int *first_hard_regno)
3247 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
3248 bool in_p, out_p;
3249 int length;
3250 ira_allocno_t a;
3252 *nrefs = 0;
3253 for (length = count = cost = i = 0;; i++)
3255 regno = regnos[i];
3256 if (regno < 0)
3257 break;
3258 *nrefs += REG_N_REFS (regno);
3259 hard_regno = reg_renumber[regno];
3260 ira_assert (hard_regno >= 0);
3261 a = ira_regno_allocno_map[regno];
3262 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
3263 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
3264 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
3265 for (j = 0; j < nregs; j++)
3266 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
3267 break;
3268 if (j == nregs)
3269 count++;
3270 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
3271 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
3272 if ((in_p || out_p)
3273 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
3275 saved_cost = 0;
3276 if (in_p)
3277 saved_cost += ira_memory_move_cost
3278 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][1];
3279 if (out_p)
3280 saved_cost
3281 += ira_memory_move_cost
3282 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][0];
3283 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
3286 *excess_pressure_live_length = length;
3287 *call_used_count = count;
3288 hard_regno = -1;
3289 if (regnos[0] >= 0)
3291 hard_regno = reg_renumber[regnos[0]];
3293 *first_hard_regno = hard_regno;
3294 return cost;
3297 /* Return TRUE if spilling pseudo-registers whose numbers are in array
3298 REGNOS is better than spilling pseudo-registers with numbers in
3299 OTHER_REGNOS for reload with given IN and OUT for INSN. The
3300 function used by the reload pass to make better register spilling
3301 decisions. */
3302 bool
3303 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
3304 rtx in, rtx out, rtx insn)
3306 int cost, other_cost;
3307 int length, other_length;
3308 int nrefs, other_nrefs;
3309 int call_used_count, other_call_used_count;
3310 int hard_regno, other_hard_regno;
3312 cost = calculate_spill_cost (regnos, in, out, insn,
3313 &length, &nrefs, &call_used_count, &hard_regno);
3314 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
3315 &other_length, &other_nrefs,
3316 &other_call_used_count,
3317 &other_hard_regno);
3318 if (nrefs == 0 && other_nrefs != 0)
3319 return true;
3320 if (nrefs != 0 && other_nrefs == 0)
3321 return false;
3322 if (cost != other_cost)
3323 return cost < other_cost;
3324 if (length != other_length)
3325 return length > other_length;
3326 #ifdef REG_ALLOC_ORDER
3327 if (hard_regno >= 0 && other_hard_regno >= 0)
3328 return (inv_reg_alloc_order[hard_regno]
3329 < inv_reg_alloc_order[other_hard_regno]);
3330 #else
3331 if (call_used_count != other_call_used_count)
3332 return call_used_count > other_call_used_count;
3333 #endif
3334 return false;
3339 /* Allocate and initialize data necessary for assign_hard_reg. */
3340 void
3341 ira_initiate_assign (void)
3343 sorted_allocnos
3344 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3345 * ira_allocnos_num);
3346 consideration_allocno_bitmap = ira_allocate_bitmap ();
3347 initiate_cost_update ();
3348 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3351 /* Deallocate data used by assign_hard_reg. */
3352 void
3353 ira_finish_assign (void)
3355 ira_free (sorted_allocnos);
3356 ira_free_bitmap (consideration_allocno_bitmap);
3357 finish_cost_update ();
3358 ira_free (allocno_priorities);
3363 /* Entry function doing color-based register allocation. */
3364 static void
3365 color (void)
3367 allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3368 removed_splay_allocno_vec
3369 = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3370 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
3371 ira_initiate_assign ();
3372 do_coloring ();
3373 ira_finish_assign ();
3374 VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
3375 VEC_free (ira_allocno_t, heap, allocno_stack_vec);
3376 move_spill_restore ();
3381 /* This page contains a simple register allocator without usage of
3382 allocno conflicts. This is used for fast allocation for -O0. */
3384 /* Do register allocation by not using allocno conflicts. It uses
3385 only allocno live ranges. The algorithm is close to Chow's
3386 priority coloring. */
3387 static void
3388 fast_allocation (void)
3390 int i, j, k, num, class_size, hard_regno;
3391 #ifdef STACK_REGS
3392 bool no_stack_reg_p;
3393 #endif
3394 enum reg_class cover_class;
3395 enum machine_mode mode;
3396 ira_allocno_t a;
3397 ira_allocno_iterator ai;
3398 live_range_t r;
3399 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
3401 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3402 * ira_allocnos_num);
3403 num = 0;
3404 FOR_EACH_ALLOCNO (a, ai)
3405 sorted_allocnos[num++] = a;
3406 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3407 setup_allocno_priorities (sorted_allocnos, num);
3408 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
3409 * ira_max_point);
3410 for (i = 0; i < ira_max_point; i++)
3411 CLEAR_HARD_REG_SET (used_hard_regs[i]);
3412 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
3413 allocno_priority_compare_func);
3414 for (i = 0; i < num; i++)
3416 int nr, l;
3418 a = sorted_allocnos[i];
3419 nr = ALLOCNO_NUM_OBJECTS (a);
3420 CLEAR_HARD_REG_SET (conflict_hard_regs);
3421 for (l = 0; l < nr; l++)
3423 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3424 IOR_HARD_REG_SET (conflict_hard_regs,
3425 OBJECT_CONFLICT_HARD_REGS (obj));
3426 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3427 for (j = r->start; j <= r->finish; j++)
3428 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
3430 cover_class = ALLOCNO_COVER_CLASS (a);
3431 ALLOCNO_ASSIGNED_P (a) = true;
3432 ALLOCNO_HARD_REGNO (a) = -1;
3433 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
3434 conflict_hard_regs))
3435 continue;
3436 mode = ALLOCNO_MODE (a);
3437 #ifdef STACK_REGS
3438 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
3439 #endif
3440 class_size = ira_class_hard_regs_num[cover_class];
3441 for (j = 0; j < class_size; j++)
3443 hard_regno = ira_class_hard_regs[cover_class][j];
3444 #ifdef STACK_REGS
3445 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
3446 && hard_regno <= LAST_STACK_REG)
3447 continue;
3448 #endif
3449 if (!ira_hard_reg_not_in_set_p (hard_regno, mode, conflict_hard_regs)
3450 || (TEST_HARD_REG_BIT
3451 (prohibited_class_mode_regs[cover_class][mode], hard_regno)))
3452 continue;
3453 ALLOCNO_HARD_REGNO (a) = hard_regno;
3454 for (l = 0; l < nr; l++)
3456 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3457 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3458 for (k = r->start; k <= r->finish; k++)
3459 IOR_HARD_REG_SET (used_hard_regs[k],
3460 ira_reg_mode_hard_regset[hard_regno][mode]);
3462 break;
3465 ira_free (sorted_allocnos);
3466 ira_free (used_hard_regs);
3467 ira_free (allocno_priorities);
3468 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3469 ira_print_disposition (ira_dump_file);
3474 /* Entry function doing coloring. */
3475 void
3476 ira_color (void)
3478 ira_allocno_t a;
3479 ira_allocno_iterator ai;
3481 /* Setup updated costs. */
3482 FOR_EACH_ALLOCNO (a, ai)
3484 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3485 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
3487 if (ira_conflicts_p)
3488 color ();
3489 else
3490 fast_allocation ();