2011-01-30 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / gcc / ira-color.c
blob9c5f4b722972f2967a2b4e6db0cf740873093971
1 /* IRA allocation based on graph coloring.
2 Copyright (C) 2006, 2007, 2008, 2009, 2010
3 Free Software Foundation, Inc.
4 Contributed by Vladimir Makarov <vmakarov@redhat.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "sbitmap.h"
32 #include "bitmap.h"
33 #include "hard-reg-set.h"
34 #include "basic-block.h"
35 #include "expr.h"
36 #include "diagnostic-core.h"
37 #include "reload.h"
38 #include "params.h"
39 #include "df.h"
40 #include "splay-tree.h"
41 #include "ira-int.h"
43 /* This file contains code for regional graph coloring, spill/restore
44 code placement optimization, and code helping the reload pass to do
45 a better job. */
47 /* Bitmap of allocnos which should be colored. */
48 static bitmap coloring_allocno_bitmap;
50 /* Bitmap of allocnos which should be taken into account during
51 coloring. In general case it contains allocnos from
52 coloring_allocno_bitmap plus other already colored conflicting
53 allocnos. */
54 static bitmap consideration_allocno_bitmap;
56 /* All allocnos sorted according their priorities. */
57 static ira_allocno_t *sorted_allocnos;
59 /* Vec representing the stack of allocnos used during coloring. */
60 static VEC(ira_allocno_t,heap) *allocno_stack_vec;
62 /* Array used to choose an allocno for spilling. */
63 static ira_allocno_t *allocnos_for_spilling;
65 /* Pool for splay tree nodes. */
66 static alloc_pool splay_tree_node_pool;
68 /* When an allocno is removed from the splay tree, it is put in the
69 following vector for subsequent inserting it into the splay tree
70 after putting all colorable allocnos onto the stack. The allocno
71 could be removed from and inserted to the splay tree every time
72 when its spilling priority is changed but such solution would be
73 more costly although simpler. */
74 static VEC(ira_allocno_t,heap) *removed_splay_allocno_vec;
76 /* Helper for qsort comparison callbacks - return a positive integer if
77 X > Y, or a negative value otherwise. Use a conditional expression
78 instead of a difference computation to insulate from possible overflow
79 issues, e.g. X - Y < 0 for some X > 0 and Y < 0. */
80 #define SORTGT(x,y) (((x) > (y)) ? 1 : -1)
84 /* This page contains functions used to find conflicts using allocno
85 live ranges. */
87 /* Return TRUE if live ranges of allocnos A1 and A2 intersect. It is
88 used to find a conflict for new allocnos or allocnos with the
89 different cover classes. */
90 static bool
91 allocnos_have_intersected_live_ranges_p (ira_allocno_t a1, ira_allocno_t a2)
93 int i, j;
94 int n1 = ALLOCNO_NUM_OBJECTS (a1);
95 int n2 = ALLOCNO_NUM_OBJECTS (a2);
97 if (a1 == a2)
98 return false;
99 if (ALLOCNO_REG (a1) != NULL && ALLOCNO_REG (a2) != NULL
100 && (ORIGINAL_REGNO (ALLOCNO_REG (a1))
101 == ORIGINAL_REGNO (ALLOCNO_REG (a2))))
102 return false;
104 for (i = 0; i < n1; i++)
106 ira_object_t c1 = ALLOCNO_OBJECT (a1, i);
107 for (j = 0; j < n2; j++)
109 ira_object_t c2 = ALLOCNO_OBJECT (a2, j);
110 if (ira_live_ranges_intersect_p (OBJECT_LIVE_RANGES (c1),
111 OBJECT_LIVE_RANGES (c2)))
112 return true;
115 return false;
118 #ifdef ENABLE_IRA_CHECKING
120 /* Return TRUE if live ranges of pseudo-registers REGNO1 and REGNO2
121 intersect. This should be used when there is only one region.
122 Currently this is used during reload. */
123 static bool
124 pseudos_have_intersected_live_ranges_p (int regno1, int regno2)
126 ira_allocno_t a1, a2;
128 ira_assert (regno1 >= FIRST_PSEUDO_REGISTER
129 && regno2 >= FIRST_PSEUDO_REGISTER);
130 /* Reg info caclulated by dataflow infrastructure can be different
131 from one calculated by regclass. */
132 if ((a1 = ira_loop_tree_root->regno_allocno_map[regno1]) == NULL
133 || (a2 = ira_loop_tree_root->regno_allocno_map[regno2]) == NULL)
134 return false;
135 return allocnos_have_intersected_live_ranges_p (a1, a2);
138 #endif
142 /* This page contains functions used to choose hard registers for
143 allocnos. */
145 /* Array whose element value is TRUE if the corresponding hard
146 register was already allocated for an allocno. */
147 static bool allocated_hardreg_p[FIRST_PSEUDO_REGISTER];
149 /* Describes one element in a queue of allocnos whose costs need to be
150 updated. Each allocno in the queue is known to have a cover class. */
151 struct update_cost_queue_elem
153 /* This element is in the queue iff CHECK == update_cost_check. */
154 int check;
156 /* COST_HOP_DIVISOR**N, where N is the length of the shortest path
157 connecting this allocno to the one being allocated. */
158 int divisor;
160 /* The next allocno in the queue, or null if this is the last element. */
161 ira_allocno_t next;
164 /* The first element in a queue of allocnos whose copy costs need to be
165 updated. Null if the queue is empty. */
166 static ira_allocno_t update_cost_queue;
168 /* The last element in the queue described by update_cost_queue.
169 Not valid if update_cost_queue is null. */
170 static struct update_cost_queue_elem *update_cost_queue_tail;
172 /* A pool of elements in the queue described by update_cost_queue.
173 Elements are indexed by ALLOCNO_NUM. */
174 static struct update_cost_queue_elem *update_cost_queue_elems;
176 /* The current value of update_copy_cost call count. */
177 static int update_cost_check;
179 /* Allocate and initialize data necessary for function
180 update_copy_costs. */
181 static void
182 initiate_cost_update (void)
184 size_t size;
186 size = ira_allocnos_num * sizeof (struct update_cost_queue_elem);
187 update_cost_queue_elems
188 = (struct update_cost_queue_elem *) ira_allocate (size);
189 memset (update_cost_queue_elems, 0, size);
190 update_cost_check = 0;
193 /* Deallocate data used by function update_copy_costs. */
194 static void
195 finish_cost_update (void)
197 ira_free (update_cost_queue_elems);
200 /* When we traverse allocnos to update hard register costs, the cost
201 divisor will be multiplied by the following macro value for each
202 hop from given allocno to directly connected allocnos. */
203 #define COST_HOP_DIVISOR 4
205 /* Start a new cost-updating pass. */
206 static void
207 start_update_cost (void)
209 update_cost_check++;
210 update_cost_queue = NULL;
213 /* Add (ALLOCNO, DIVISOR) to the end of update_cost_queue,
214 unless ALLOCNO is already in the queue, or has no cover class. */
215 static inline void
216 queue_update_cost (ira_allocno_t allocno, int divisor)
218 struct update_cost_queue_elem *elem;
220 elem = &update_cost_queue_elems[ALLOCNO_NUM (allocno)];
221 if (elem->check != update_cost_check
222 && ALLOCNO_COVER_CLASS (allocno) != NO_REGS)
224 elem->check = update_cost_check;
225 elem->divisor = divisor;
226 elem->next = NULL;
227 if (update_cost_queue == NULL)
228 update_cost_queue = allocno;
229 else
230 update_cost_queue_tail->next = allocno;
231 update_cost_queue_tail = elem;
235 /* Try to remove the first element from update_cost_queue. Return false
236 if the queue was empty, otherwise make (*ALLOCNO, *DIVISOR) describe
237 the removed element. */
238 static inline bool
239 get_next_update_cost (ira_allocno_t *allocno, int *divisor)
241 struct update_cost_queue_elem *elem;
243 if (update_cost_queue == NULL)
244 return false;
246 *allocno = update_cost_queue;
247 elem = &update_cost_queue_elems[ALLOCNO_NUM (*allocno)];
248 *divisor = elem->divisor;
249 update_cost_queue = elem->next;
250 return true;
253 /* Update the cost of allocnos to increase chances to remove some
254 copies as the result of subsequent assignment. */
255 static void
256 update_copy_costs (ira_allocno_t allocno, bool decr_p)
258 int i, cost, update_cost, hard_regno, divisor;
259 enum machine_mode mode;
260 enum reg_class rclass, cover_class;
261 ira_allocno_t another_allocno;
262 ira_copy_t cp, next_cp;
264 hard_regno = ALLOCNO_HARD_REGNO (allocno);
265 ira_assert (hard_regno >= 0);
267 cover_class = ALLOCNO_COVER_CLASS (allocno);
268 if (cover_class == NO_REGS)
269 return;
270 i = ira_class_hard_reg_index[cover_class][hard_regno];
271 ira_assert (i >= 0);
272 rclass = REGNO_REG_CLASS (hard_regno);
274 start_update_cost ();
275 divisor = 1;
278 mode = ALLOCNO_MODE (allocno);
279 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
281 if (cp->first == allocno)
283 next_cp = cp->next_first_allocno_copy;
284 another_allocno = cp->second;
286 else if (cp->second == allocno)
288 next_cp = cp->next_second_allocno_copy;
289 another_allocno = cp->first;
291 else
292 gcc_unreachable ();
294 cover_class = ALLOCNO_COVER_CLASS (another_allocno);
295 if (! ira_reg_classes_intersect_p[rclass][cover_class]
296 || ALLOCNO_ASSIGNED_P (another_allocno))
297 continue;
299 cost = (cp->second == allocno
300 ? ira_get_register_move_cost (mode, rclass, cover_class)
301 : ira_get_register_move_cost (mode, cover_class, rclass));
302 if (decr_p)
303 cost = -cost;
305 update_cost = cp->freq * cost / divisor;
306 if (update_cost == 0)
307 continue;
309 ira_allocate_and_set_or_copy_costs
310 (&ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno), cover_class,
311 ALLOCNO_UPDATED_COVER_CLASS_COST (another_allocno),
312 ALLOCNO_HARD_REG_COSTS (another_allocno));
313 ira_allocate_and_set_or_copy_costs
314 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
315 cover_class, 0,
316 ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
317 i = ira_class_hard_reg_index[cover_class][hard_regno];
318 ira_assert (i >= 0);
319 ALLOCNO_UPDATED_HARD_REG_COSTS (another_allocno)[i] += update_cost;
320 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno)[i]
321 += update_cost;
323 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
326 while (get_next_update_cost (&allocno, &divisor));
329 /* This function updates COSTS (decrease if DECR_P) for hard_registers
330 of COVER_CLASS by conflict costs of the unassigned allocnos
331 connected by copies with allocnos in update_cost_queue. This
332 update increases chances to remove some copies. */
333 static void
334 update_conflict_hard_regno_costs (int *costs, enum reg_class cover_class,
335 bool decr_p)
337 int i, cost, class_size, freq, mult, div, divisor;
338 int index, hard_regno;
339 int *conflict_costs;
340 bool cont_p;
341 enum reg_class another_cover_class;
342 ira_allocno_t allocno, another_allocno;
343 ira_copy_t cp, next_cp;
345 while (get_next_update_cost (&allocno, &divisor))
346 for (cp = ALLOCNO_COPIES (allocno); cp != NULL; cp = next_cp)
348 if (cp->first == allocno)
350 next_cp = cp->next_first_allocno_copy;
351 another_allocno = cp->second;
353 else if (cp->second == allocno)
355 next_cp = cp->next_second_allocno_copy;
356 another_allocno = cp->first;
358 else
359 gcc_unreachable ();
360 another_cover_class = ALLOCNO_COVER_CLASS (another_allocno);
361 if (! ira_reg_classes_intersect_p[cover_class][another_cover_class]
362 || ALLOCNO_ASSIGNED_P (another_allocno)
363 || ALLOCNO_MAY_BE_SPILLED_P (another_allocno))
364 continue;
365 class_size = ira_class_hard_regs_num[another_cover_class];
366 ira_allocate_and_copy_costs
367 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno),
368 another_cover_class,
369 ALLOCNO_CONFLICT_HARD_REG_COSTS (another_allocno));
370 conflict_costs
371 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (another_allocno);
372 if (conflict_costs == NULL)
373 cont_p = true;
374 else
376 mult = cp->freq;
377 freq = ALLOCNO_FREQ (another_allocno);
378 if (freq == 0)
379 freq = 1;
380 div = freq * divisor;
381 cont_p = false;
382 for (i = class_size - 1; i >= 0; i--)
384 hard_regno = ira_class_hard_regs[another_cover_class][i];
385 ira_assert (hard_regno >= 0);
386 index = ira_class_hard_reg_index[cover_class][hard_regno];
387 if (index < 0)
388 continue;
389 cost = conflict_costs [i] * mult / div;
390 if (cost == 0)
391 continue;
392 cont_p = true;
393 if (decr_p)
394 cost = -cost;
395 costs[index] += cost;
398 /* Probably 5 hops will be enough. */
399 if (cont_p
400 && divisor <= (COST_HOP_DIVISOR
401 * COST_HOP_DIVISOR
402 * COST_HOP_DIVISOR
403 * COST_HOP_DIVISOR))
404 queue_update_cost (another_allocno, divisor * COST_HOP_DIVISOR);
408 /* Choose a hard register for allocno A. If RETRY_P is TRUE, it means
409 that the function called from function
410 `ira_reassign_conflict_allocnos' and `allocno_reload_assign'. */
411 static bool
412 assign_hard_reg (ira_allocno_t a, bool retry_p)
414 HARD_REG_SET conflicting_regs[2];
415 int i, j, hard_regno, nregs, best_hard_regno, class_size;
416 int cost, mem_cost, min_cost, full_cost, min_full_cost, nwords, word;
417 int *a_costs;
418 enum reg_class cover_class;
419 enum machine_mode mode;
420 static int costs[FIRST_PSEUDO_REGISTER], full_costs[FIRST_PSEUDO_REGISTER];
421 #ifndef HONOR_REG_ALLOC_ORDER
422 enum reg_class rclass;
423 int add_cost;
424 #endif
425 #ifdef STACK_REGS
426 bool no_stack_reg_p;
427 #endif
429 nwords = ALLOCNO_NUM_OBJECTS (a);
430 ira_assert (! ALLOCNO_ASSIGNED_P (a));
431 cover_class = ALLOCNO_COVER_CLASS (a);
432 class_size = ira_class_hard_regs_num[cover_class];
433 mode = ALLOCNO_MODE (a);
434 for (i = 0; i < nwords; i++)
435 CLEAR_HARD_REG_SET (conflicting_regs[i]);
436 best_hard_regno = -1;
437 memset (full_costs, 0, sizeof (int) * class_size);
438 mem_cost = 0;
439 memset (costs, 0, sizeof (int) * class_size);
440 memset (full_costs, 0, sizeof (int) * class_size);
441 #ifdef STACK_REGS
442 no_stack_reg_p = false;
443 #endif
444 start_update_cost ();
445 mem_cost += ALLOCNO_UPDATED_MEMORY_COST (a);
447 ira_allocate_and_copy_costs (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
448 cover_class, ALLOCNO_HARD_REG_COSTS (a));
449 a_costs = ALLOCNO_UPDATED_HARD_REG_COSTS (a);
450 #ifdef STACK_REGS
451 no_stack_reg_p = no_stack_reg_p || ALLOCNO_TOTAL_NO_STACK_REG_P (a);
452 #endif
453 cost = ALLOCNO_UPDATED_COVER_CLASS_COST (a);
454 for (i = 0; i < class_size; i++)
455 if (a_costs != NULL)
457 costs[i] += a_costs[i];
458 full_costs[i] += a_costs[i];
460 else
462 costs[i] += cost;
463 full_costs[i] += cost;
465 for (word = 0; word < nwords; word++)
467 ira_object_t conflict_obj;
468 ira_object_t obj = ALLOCNO_OBJECT (a, word);
469 ira_object_conflict_iterator oci;
471 IOR_HARD_REG_SET (conflicting_regs[word],
472 OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
473 /* Take preferences of conflicting allocnos into account. */
474 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
476 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
477 enum reg_class conflict_cover_class;
479 /* Reload can give another class so we need to check all
480 allocnos. */
481 if (!retry_p && !bitmap_bit_p (consideration_allocno_bitmap,
482 ALLOCNO_NUM (conflict_a)))
483 continue;
484 conflict_cover_class = ALLOCNO_COVER_CLASS (conflict_a);
485 ira_assert (ira_reg_classes_intersect_p
486 [cover_class][conflict_cover_class]);
487 if (ALLOCNO_ASSIGNED_P (conflict_a))
489 hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
490 if (hard_regno >= 0
491 && ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
493 enum machine_mode mode = ALLOCNO_MODE (conflict_a);
494 int conflict_nregs = hard_regno_nregs[hard_regno][mode];
495 int n_objects = ALLOCNO_NUM_OBJECTS (conflict_a);
497 if (conflict_nregs == n_objects && conflict_nregs > 1)
499 int num = OBJECT_SUBWORD (conflict_obj);
501 if (WORDS_BIG_ENDIAN)
502 SET_HARD_REG_BIT (conflicting_regs[word],
503 hard_regno + n_objects - num - 1);
504 else
505 SET_HARD_REG_BIT (conflicting_regs[word],
506 hard_regno + num);
508 else
509 IOR_HARD_REG_SET
510 (conflicting_regs[word],
511 ira_reg_mode_hard_regset[hard_regno][mode]);
512 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
513 conflicting_regs[word]))
514 goto fail;
517 else if (! ALLOCNO_MAY_BE_SPILLED_P (conflict_a))
519 int k, *conflict_costs;
521 ira_allocate_and_copy_costs
522 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a),
523 conflict_cover_class,
524 ALLOCNO_CONFLICT_HARD_REG_COSTS (conflict_a));
525 conflict_costs
526 = ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (conflict_a);
527 if (conflict_costs != NULL)
528 for (j = class_size - 1; j >= 0; j--)
530 hard_regno = ira_class_hard_regs[cover_class][j];
531 ira_assert (hard_regno >= 0);
532 k = (ira_class_hard_reg_index
533 [conflict_cover_class][hard_regno]);
534 if (k < 0)
535 continue;
536 full_costs[j] -= conflict_costs[k];
538 queue_update_cost (conflict_a, COST_HOP_DIVISOR);
542 /* Take into account preferences of allocnos connected by copies to
543 the conflict allocnos. */
544 update_conflict_hard_regno_costs (full_costs, cover_class, true);
546 /* Take preferences of allocnos connected by copies into
547 account. */
548 start_update_cost ();
549 queue_update_cost (a, COST_HOP_DIVISOR);
550 update_conflict_hard_regno_costs (full_costs, cover_class, false);
551 min_cost = min_full_cost = INT_MAX;
553 /* We don't care about giving callee saved registers to allocnos no
554 living through calls because call clobbered registers are
555 allocated first (it is usual practice to put them first in
556 REG_ALLOC_ORDER). */
557 for (i = 0; i < class_size; i++)
559 hard_regno = ira_class_hard_regs[cover_class][i];
560 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
561 #ifdef STACK_REGS
562 if (no_stack_reg_p
563 && FIRST_STACK_REG <= hard_regno && hard_regno <= LAST_STACK_REG)
564 continue;
565 #endif
566 if (TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
567 hard_regno))
568 continue;
569 for (j = 0; j < nregs; j++)
571 int k;
572 int set_to_test_start = 0, set_to_test_end = nwords;
573 if (nregs == nwords)
575 if (WORDS_BIG_ENDIAN)
576 set_to_test_start = nwords - j - 1;
577 else
578 set_to_test_start = j;
579 set_to_test_end = set_to_test_start + 1;
581 for (k = set_to_test_start; k < set_to_test_end; k++)
582 if (TEST_HARD_REG_BIT (conflicting_regs[k], hard_regno + j))
583 break;
584 if (k != set_to_test_end)
585 break;
587 if (j != nregs)
588 continue;
589 cost = costs[i];
590 full_cost = full_costs[i];
591 #ifndef HONOR_REG_ALLOC_ORDER
592 if (! allocated_hardreg_p[hard_regno]
593 && ira_hard_reg_not_in_set_p (hard_regno, mode, call_used_reg_set))
594 /* We need to save/restore the hard register in
595 epilogue/prologue. Therefore we increase the cost. */
597 /* ??? If only part is call clobbered. */
598 rclass = REGNO_REG_CLASS (hard_regno);
599 add_cost = (ira_memory_move_cost[mode][rclass][0]
600 + ira_memory_move_cost[mode][rclass][1] - 1);
601 cost += add_cost;
602 full_cost += add_cost;
604 #endif
605 if (min_cost > cost)
606 min_cost = cost;
607 if (min_full_cost > full_cost)
609 min_full_cost = full_cost;
610 best_hard_regno = hard_regno;
611 ira_assert (hard_regno >= 0);
614 if (min_full_cost > mem_cost)
616 if (! retry_p && internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
617 fprintf (ira_dump_file, "(memory is more profitable %d vs %d) ",
618 mem_cost, min_full_cost);
619 best_hard_regno = -1;
621 fail:
622 if (best_hard_regno >= 0)
623 allocated_hardreg_p[best_hard_regno] = true;
624 ALLOCNO_HARD_REGNO (a) = best_hard_regno;
625 ALLOCNO_ASSIGNED_P (a) = true;
626 if (best_hard_regno >= 0)
627 update_copy_costs (a, true);
628 /* We don't need updated costs anymore: */
629 ira_free_allocno_updated_costs (a);
630 return best_hard_regno >= 0;
635 /* This page contains the allocator based on the Chaitin-Briggs algorithm. */
637 /* Bucket of allocnos that can colored currently without spilling. */
638 static ira_allocno_t colorable_allocno_bucket;
640 /* Bucket of allocnos that might be not colored currently without
641 spilling. */
642 static ira_allocno_t uncolorable_allocno_bucket;
644 /* Each element of the array contains the current number of allocnos
645 of given *cover* class in the uncolorable_bucket. */
646 static int uncolorable_allocnos_num[N_REG_CLASSES];
648 /* Return the current spill priority of allocno A. The less the
649 number, the more preferable the allocno for spilling. */
650 static int
651 allocno_spill_priority (ira_allocno_t a)
653 return (ALLOCNO_TEMP (a)
654 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a)
655 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]
656 + 1));
659 /* Add ALLOCNO to bucket *BUCKET_PTR. ALLOCNO should be not in a bucket
660 before the call. */
661 static void
662 add_allocno_to_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
664 ira_allocno_t first_allocno;
665 enum reg_class cover_class;
667 if (bucket_ptr == &uncolorable_allocno_bucket
668 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
670 uncolorable_allocnos_num[cover_class]++;
671 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
673 first_allocno = *bucket_ptr;
674 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = first_allocno;
675 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = NULL;
676 if (first_allocno != NULL)
677 ALLOCNO_PREV_BUCKET_ALLOCNO (first_allocno) = allocno;
678 *bucket_ptr = allocno;
681 /* Compare two allocnos to define which allocno should be pushed first
682 into the coloring stack. If the return is a negative number, the
683 allocno given by the first parameter will be pushed first. In this
684 case such allocno has less priority than the second one and the
685 hard register will be assigned to it after assignment to the second
686 one. As the result of such assignment order, the second allocno
687 has a better chance to get the best hard register. */
688 static int
689 bucket_allocno_compare_func (const void *v1p, const void *v2p)
691 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
692 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
693 int diff, a1_freq, a2_freq, a1_num, a2_num;
695 if ((diff = (int) ALLOCNO_COVER_CLASS (a2) - ALLOCNO_COVER_CLASS (a1)) != 0)
696 return diff;
697 a1_freq = ALLOCNO_FREQ (a1);
698 a1_num = ALLOCNO_AVAILABLE_REGS_NUM (a1);
699 a2_freq = ALLOCNO_FREQ (a2);
700 a2_num = ALLOCNO_AVAILABLE_REGS_NUM (a2);
701 if ((diff = a2_num - a1_num) != 0)
702 return diff;
703 else if ((diff = a1_freq - a2_freq) != 0)
704 return diff;
705 return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
708 /* Sort bucket *BUCKET_PTR and return the result through
709 BUCKET_PTR. */
710 static void
711 sort_bucket (ira_allocno_t *bucket_ptr)
713 ira_allocno_t a, head;
714 int n;
716 for (n = 0, a = *bucket_ptr; a != NULL; a = ALLOCNO_NEXT_BUCKET_ALLOCNO (a))
717 sorted_allocnos[n++] = a;
718 if (n <= 1)
719 return;
720 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
721 bucket_allocno_compare_func);
722 head = NULL;
723 for (n--; n >= 0; n--)
725 a = sorted_allocnos[n];
726 ALLOCNO_NEXT_BUCKET_ALLOCNO (a) = head;
727 ALLOCNO_PREV_BUCKET_ALLOCNO (a) = NULL;
728 if (head != NULL)
729 ALLOCNO_PREV_BUCKET_ALLOCNO (head) = a;
730 head = a;
732 *bucket_ptr = head;
735 /* Add ALLOCNO to bucket *BUCKET_PTR maintaining the order according
736 their priority. ALLOCNO should be not in a bucket before the
737 call. */
738 static void
739 add_allocno_to_ordered_bucket (ira_allocno_t allocno,
740 ira_allocno_t *bucket_ptr)
742 ira_allocno_t before, after;
743 enum reg_class cover_class;
745 if (bucket_ptr == &uncolorable_allocno_bucket
746 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
748 uncolorable_allocnos_num[cover_class]++;
749 ira_assert (uncolorable_allocnos_num[cover_class] > 0);
751 for (before = *bucket_ptr, after = NULL;
752 before != NULL;
753 after = before, before = ALLOCNO_NEXT_BUCKET_ALLOCNO (before))
754 if (bucket_allocno_compare_func (&allocno, &before) < 0)
755 break;
756 ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno) = before;
757 ALLOCNO_PREV_BUCKET_ALLOCNO (allocno) = after;
758 if (after == NULL)
759 *bucket_ptr = allocno;
760 else
761 ALLOCNO_NEXT_BUCKET_ALLOCNO (after) = allocno;
762 if (before != NULL)
763 ALLOCNO_PREV_BUCKET_ALLOCNO (before) = allocno;
766 /* Delete ALLOCNO from bucket *BUCKET_PTR. It should be there before
767 the call. */
768 static void
769 delete_allocno_from_bucket (ira_allocno_t allocno, ira_allocno_t *bucket_ptr)
771 ira_allocno_t prev_allocno, next_allocno;
772 enum reg_class cover_class;
774 if (bucket_ptr == &uncolorable_allocno_bucket
775 && (cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
777 uncolorable_allocnos_num[cover_class]--;
778 ira_assert (uncolorable_allocnos_num[cover_class] >= 0);
780 prev_allocno = ALLOCNO_PREV_BUCKET_ALLOCNO (allocno);
781 next_allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno);
782 if (prev_allocno != NULL)
783 ALLOCNO_NEXT_BUCKET_ALLOCNO (prev_allocno) = next_allocno;
784 else
786 ira_assert (*bucket_ptr == allocno);
787 *bucket_ptr = next_allocno;
789 if (next_allocno != NULL)
790 ALLOCNO_PREV_BUCKET_ALLOCNO (next_allocno) = prev_allocno;
793 /* Splay tree for each cover class. The trees are indexed by the
794 corresponding cover classes. Splay trees contain uncolorable
795 allocnos. */
796 static splay_tree uncolorable_allocnos_splay_tree[N_REG_CLASSES];
798 /* If the following macro is TRUE, splay tree is used to choose an
799 allocno of the corresponding cover class for spilling. When the
800 number uncolorable allocnos of given cover class decreases to some
801 threshold, linear array search is used to find the best allocno for
802 spilling. This threshold is actually pretty big because, although
803 splay trees asymptotically is much faster, each splay tree
804 operation is sufficiently costly especially taking cache locality
805 into account. */
806 #define USE_SPLAY_P(CLASS) (uncolorable_allocnos_num[CLASS] > 4000)
808 /* Put allocno A onto the coloring stack without removing it from its
809 bucket. Pushing allocno to the coloring stack can result in moving
810 conflicting allocnos from the uncolorable bucket to the colorable
811 one. */
812 static void
813 push_allocno_to_stack (ira_allocno_t a)
815 int size;
816 enum reg_class cover_class;
817 int i, n = ALLOCNO_NUM_OBJECTS (a);
819 ALLOCNO_IN_GRAPH_P (a) = false;
820 VEC_safe_push (ira_allocno_t, heap, allocno_stack_vec, a);
821 cover_class = ALLOCNO_COVER_CLASS (a);
822 if (cover_class == NO_REGS)
823 return;
824 size = ira_reg_class_nregs[cover_class][ALLOCNO_MODE (a)];
825 if (ALLOCNO_NUM_OBJECTS (a) > 1)
827 /* We will deal with the subwords individually. */
828 gcc_assert (size == ALLOCNO_NUM_OBJECTS (a));
829 size = 1;
831 for (i = 0; i < n; i++)
833 ira_object_t obj = ALLOCNO_OBJECT (a, i);
834 int conflict_size;
835 ira_object_t conflict_obj;
836 ira_object_conflict_iterator oci;
838 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
840 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
841 int left_conflicts_size;
843 conflict_a = conflict_a;
844 if (!bitmap_bit_p (coloring_allocno_bitmap,
845 ALLOCNO_NUM (conflict_a)))
846 continue;
848 ira_assert (cover_class
849 == ALLOCNO_COVER_CLASS (conflict_a));
850 if (!ALLOCNO_IN_GRAPH_P (conflict_a)
851 || ALLOCNO_ASSIGNED_P (conflict_a))
852 continue;
854 left_conflicts_size = ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a);
855 conflict_size
856 = (ira_reg_class_nregs
857 [cover_class][ALLOCNO_MODE (conflict_a)]);
858 ira_assert (left_conflicts_size >= size);
859 if (left_conflicts_size + conflict_size
860 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_a))
862 ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a) -= size;
863 continue;
865 left_conflicts_size -= size;
866 if (uncolorable_allocnos_splay_tree[cover_class] != NULL
867 && !ALLOCNO_SPLAY_REMOVED_P (conflict_a)
868 && USE_SPLAY_P (cover_class))
870 ira_assert
871 (splay_tree_lookup
872 (uncolorable_allocnos_splay_tree[cover_class],
873 (splay_tree_key) conflict_a) != NULL);
874 splay_tree_remove
875 (uncolorable_allocnos_splay_tree[cover_class],
876 (splay_tree_key) conflict_a);
877 ALLOCNO_SPLAY_REMOVED_P (conflict_a) = true;
878 VEC_safe_push (ira_allocno_t, heap,
879 removed_splay_allocno_vec,
880 conflict_a);
882 ALLOCNO_LEFT_CONFLICTS_SIZE (conflict_a)
883 = left_conflicts_size;
884 if (left_conflicts_size + conflict_size
885 <= ALLOCNO_AVAILABLE_REGS_NUM (conflict_a))
887 delete_allocno_from_bucket
888 (conflict_a, &uncolorable_allocno_bucket);
889 add_allocno_to_ordered_bucket
890 (conflict_a, &colorable_allocno_bucket);
896 /* Put ALLOCNO onto the coloring stack and remove it from its bucket.
897 The allocno is in the colorable bucket if COLORABLE_P is TRUE. */
898 static void
899 remove_allocno_from_bucket_and_push (ira_allocno_t allocno, bool colorable_p)
901 enum reg_class cover_class;
903 if (colorable_p)
904 delete_allocno_from_bucket (allocno, &colorable_allocno_bucket);
905 else
906 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
907 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
909 fprintf (ira_dump_file, " Pushing");
910 ira_print_expanded_allocno (allocno);
911 if (colorable_p)
912 fprintf (ira_dump_file, "\n");
913 else
914 fprintf (ira_dump_file, "(potential spill: %spri=%d, cost=%d)\n",
915 ALLOCNO_BAD_SPILL_P (allocno) ? "bad spill, " : "",
916 allocno_spill_priority (allocno), ALLOCNO_TEMP (allocno));
918 cover_class = ALLOCNO_COVER_CLASS (allocno);
919 ira_assert ((colorable_p
920 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
921 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
922 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno)))
923 || (! colorable_p
924 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
925 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
926 (allocno)]
927 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))));
928 if (! colorable_p)
929 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
930 push_allocno_to_stack (allocno);
933 /* Put all allocnos from colorable bucket onto the coloring stack. */
934 static void
935 push_only_colorable (void)
937 sort_bucket (&colorable_allocno_bucket);
938 for (;colorable_allocno_bucket != NULL;)
939 remove_allocno_from_bucket_and_push (colorable_allocno_bucket, true);
942 /* Puts ALLOCNO chosen for potential spilling onto the coloring
943 stack. */
944 static void
945 push_allocno_to_spill (ira_allocno_t allocno)
947 delete_allocno_from_bucket (allocno, &uncolorable_allocno_bucket);
948 ALLOCNO_MAY_BE_SPILLED_P (allocno) = true;
949 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
950 fprintf (ira_dump_file, " Pushing p%d(%d) (spill for NO_REGS)\n",
951 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno));
952 push_allocno_to_stack (allocno);
955 /* Return the frequency of exit edges (if EXIT_P) or entry from/to the
956 loop given by its LOOP_NODE. */
958 ira_loop_edge_freq (ira_loop_tree_node_t loop_node, int regno, bool exit_p)
960 int freq, i;
961 edge_iterator ei;
962 edge e;
963 VEC (edge, heap) *edges;
965 ira_assert (loop_node->loop != NULL
966 && (regno < 0 || regno >= FIRST_PSEUDO_REGISTER));
967 freq = 0;
968 if (! exit_p)
970 FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
971 if (e->src != loop_node->loop->latch
972 && (regno < 0
973 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
974 && bitmap_bit_p (DF_LR_IN (e->dest), regno))))
975 freq += EDGE_FREQUENCY (e);
977 else
979 edges = get_loop_exit_edges (loop_node->loop);
980 FOR_EACH_VEC_ELT (edge, edges, i, e)
981 if (regno < 0
982 || (bitmap_bit_p (DF_LR_OUT (e->src), regno)
983 && bitmap_bit_p (DF_LR_IN (e->dest), regno)))
984 freq += EDGE_FREQUENCY (e);
985 VEC_free (edge, heap, edges);
988 return REG_FREQ_FROM_EDGE_FREQ (freq);
991 /* Calculate and return the cost of putting allocno A into memory. */
992 static int
993 calculate_allocno_spill_cost (ira_allocno_t a)
995 int regno, cost;
996 enum machine_mode mode;
997 enum reg_class rclass;
998 ira_allocno_t parent_allocno;
999 ira_loop_tree_node_t parent_node, loop_node;
1001 regno = ALLOCNO_REGNO (a);
1002 cost = ALLOCNO_UPDATED_MEMORY_COST (a) - ALLOCNO_UPDATED_COVER_CLASS_COST (a);
1003 if (ALLOCNO_CAP (a) != NULL)
1004 return cost;
1005 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
1006 if ((parent_node = loop_node->parent) == NULL)
1007 return cost;
1008 if ((parent_allocno = parent_node->regno_allocno_map[regno]) == NULL)
1009 return cost;
1010 mode = ALLOCNO_MODE (a);
1011 rclass = ALLOCNO_COVER_CLASS (a);
1012 if (ALLOCNO_HARD_REGNO (parent_allocno) < 0)
1013 cost -= (ira_memory_move_cost[mode][rclass][0]
1014 * ira_loop_edge_freq (loop_node, regno, true)
1015 + ira_memory_move_cost[mode][rclass][1]
1016 * ira_loop_edge_freq (loop_node, regno, false));
1017 else
1018 cost += ((ira_memory_move_cost[mode][rclass][1]
1019 * ira_loop_edge_freq (loop_node, regno, true)
1020 + ira_memory_move_cost[mode][rclass][0]
1021 * ira_loop_edge_freq (loop_node, regno, false))
1022 - (ira_get_register_move_cost (mode, rclass, rclass)
1023 * (ira_loop_edge_freq (loop_node, regno, false)
1024 + ira_loop_edge_freq (loop_node, regno, true))));
1025 return cost;
1028 /* Compare keys in the splay tree used to choose best allocno for
1029 spilling. The best allocno has the minimal key. */
1030 static int
1031 allocno_spill_priority_compare (splay_tree_key k1, splay_tree_key k2)
1033 int pri1, pri2, diff;
1034 ira_allocno_t a1 = (ira_allocno_t) k1, a2 = (ira_allocno_t) k2;
1036 pri1 = (ALLOCNO_TEMP (a1)
1037 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a1)
1038 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a1)][ALLOCNO_MODE (a1)]
1039 + 1));
1040 pri2 = (ALLOCNO_TEMP (a2)
1041 / (ALLOCNO_LEFT_CONFLICTS_SIZE (a2)
1042 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a2)][ALLOCNO_MODE (a2)]
1043 + 1));
1044 if ((diff = pri1 - pri2) != 0)
1045 return diff;
1046 if ((diff = ALLOCNO_TEMP (a1) - ALLOCNO_TEMP (a2)) != 0)
1047 return diff;
1048 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
1051 /* Allocate data of SIZE for the splay trees. We allocate only spay
1052 tree roots or splay tree nodes. If you change this, please rewrite
1053 the function. */
1054 static void *
1055 splay_tree_allocate (int size, void *data ATTRIBUTE_UNUSED)
1057 if (size != sizeof (struct splay_tree_node_s))
1058 return ira_allocate (size);
1059 return pool_alloc (splay_tree_node_pool);
1062 /* Free data NODE for the splay trees. We allocate and free only spay
1063 tree roots or splay tree nodes. If you change this, please rewrite
1064 the function. */
1065 static void
1066 splay_tree_free (void *node, void *data ATTRIBUTE_UNUSED)
1068 int i;
1069 enum reg_class cover_class;
1071 for (i = 0; i < ira_reg_class_cover_size; i++)
1073 cover_class = ira_reg_class_cover[i];
1074 if (node == uncolorable_allocnos_splay_tree[cover_class])
1076 ira_free (node);
1077 return;
1080 pool_free (splay_tree_node_pool, node);
1083 /* Push allocnos to the coloring stack. The order of allocnos in the
1084 stack defines the order for the subsequent coloring. */
1085 static void
1086 push_allocnos_to_stack (void)
1088 ira_allocno_t allocno, i_allocno, *allocno_vec;
1089 enum reg_class cover_class, rclass;
1090 int allocno_pri, i_allocno_pri, allocno_cost, i_allocno_cost;
1091 int i, j, num, cover_class_allocnos_num[N_REG_CLASSES];
1092 ira_allocno_t *cover_class_allocnos[N_REG_CLASSES];
1093 int cost;
1095 /* Initialize. */
1096 VEC_truncate(ira_allocno_t, removed_splay_allocno_vec, 0);
1097 for (i = 0; i < ira_reg_class_cover_size; i++)
1099 cover_class = ira_reg_class_cover[i];
1100 cover_class_allocnos_num[cover_class] = 0;
1101 cover_class_allocnos[cover_class] = NULL;
1102 uncolorable_allocnos_splay_tree[cover_class] = NULL;
1104 /* Calculate uncolorable allocno spill costs. */
1105 for (allocno = uncolorable_allocno_bucket;
1106 allocno != NULL;
1107 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1108 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1110 cover_class_allocnos_num[cover_class]++;
1111 cost = calculate_allocno_spill_cost (allocno);
1112 ALLOCNO_TEMP (allocno) = cost;
1114 /* Define place where to put uncolorable allocnos of the same cover
1115 class. */
1116 for (num = i = 0; i < ira_reg_class_cover_size; i++)
1118 cover_class = ira_reg_class_cover[i];
1119 ira_assert (cover_class_allocnos_num[cover_class]
1120 == uncolorable_allocnos_num[cover_class]);
1121 if (cover_class_allocnos_num[cover_class] != 0)
1123 cover_class_allocnos[cover_class] = allocnos_for_spilling + num;
1124 num += cover_class_allocnos_num[cover_class];
1125 cover_class_allocnos_num[cover_class] = 0;
1127 if (USE_SPLAY_P (cover_class))
1128 uncolorable_allocnos_splay_tree[cover_class]
1129 = splay_tree_new_with_allocator (allocno_spill_priority_compare,
1130 NULL, NULL, splay_tree_allocate,
1131 splay_tree_free, NULL);
1133 ira_assert (num <= ira_allocnos_num);
1134 /* Collect uncolorable allocnos of each cover class. */
1135 for (allocno = uncolorable_allocno_bucket;
1136 allocno != NULL;
1137 allocno = ALLOCNO_NEXT_BUCKET_ALLOCNO (allocno))
1138 if ((cover_class = ALLOCNO_COVER_CLASS (allocno)) != NO_REGS)
1140 cover_class_allocnos
1141 [cover_class][cover_class_allocnos_num[cover_class]++] = allocno;
1142 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1143 splay_tree_insert (uncolorable_allocnos_splay_tree[cover_class],
1144 (splay_tree_key) allocno,
1145 (splay_tree_value) allocno);
1147 for (;;)
1149 push_only_colorable ();
1150 allocno = uncolorable_allocno_bucket;
1151 if (allocno == NULL)
1152 break;
1153 cover_class = ALLOCNO_COVER_CLASS (allocno);
1154 if (cover_class == NO_REGS)
1156 push_allocno_to_spill (allocno);
1157 continue;
1159 /* Potential spilling. */
1160 ira_assert
1161 (ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)] > 0);
1162 if (USE_SPLAY_P (cover_class))
1164 for (;VEC_length (ira_allocno_t, removed_splay_allocno_vec) != 0;)
1166 allocno = VEC_pop (ira_allocno_t, removed_splay_allocno_vec);
1167 ALLOCNO_SPLAY_REMOVED_P (allocno) = false;
1168 rclass = ALLOCNO_COVER_CLASS (allocno);
1169 if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1170 + ira_reg_class_nregs [rclass][ALLOCNO_MODE (allocno)]
1171 > ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1172 splay_tree_insert
1173 (uncolorable_allocnos_splay_tree[rclass],
1174 (splay_tree_key) allocno, (splay_tree_value) allocno);
1176 allocno = ((ira_allocno_t)
1177 splay_tree_min
1178 (uncolorable_allocnos_splay_tree[cover_class])->key);
1179 splay_tree_remove (uncolorable_allocnos_splay_tree[cover_class],
1180 (splay_tree_key) allocno);
1182 else
1184 num = cover_class_allocnos_num[cover_class];
1185 ira_assert (num > 0);
1186 allocno_vec = cover_class_allocnos[cover_class];
1187 allocno = NULL;
1188 allocno_pri = allocno_cost = 0;
1189 /* Sort uncolorable allocno to find the one with the lowest
1190 spill cost. */
1191 for (i = 0, j = num - 1; i <= j;)
1193 i_allocno = allocno_vec[i];
1194 if (! ALLOCNO_IN_GRAPH_P (i_allocno)
1195 && ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1197 i_allocno = allocno_vec[j];
1198 allocno_vec[j] = allocno_vec[i];
1199 allocno_vec[i] = i_allocno;
1201 if (ALLOCNO_IN_GRAPH_P (i_allocno))
1203 i++;
1204 ira_assert (ALLOCNO_TEMP (i_allocno) != INT_MAX);
1205 i_allocno_cost = ALLOCNO_TEMP (i_allocno);
1206 i_allocno_pri = allocno_spill_priority (i_allocno);
1207 if (allocno == NULL
1208 || (! ALLOCNO_BAD_SPILL_P (i_allocno)
1209 && ALLOCNO_BAD_SPILL_P (allocno))
1210 || (! (ALLOCNO_BAD_SPILL_P (i_allocno)
1211 && ! ALLOCNO_BAD_SPILL_P (allocno))
1212 && (allocno_pri > i_allocno_pri
1213 || (allocno_pri == i_allocno_pri
1214 && (allocno_cost > i_allocno_cost
1215 || (allocno_cost == i_allocno_cost
1216 && (ALLOCNO_NUM (allocno)
1217 > ALLOCNO_NUM (i_allocno))))))))
1219 allocno = i_allocno;
1220 allocno_cost = i_allocno_cost;
1221 allocno_pri = i_allocno_pri;
1224 if (! ALLOCNO_IN_GRAPH_P (allocno_vec[j]))
1225 j--;
1227 ira_assert (allocno != NULL && j >= 0);
1228 cover_class_allocnos_num[cover_class] = j + 1;
1230 ira_assert (ALLOCNO_IN_GRAPH_P (allocno)
1231 && ALLOCNO_COVER_CLASS (allocno) == cover_class
1232 && (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1233 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE
1234 (allocno)]
1235 > ALLOCNO_AVAILABLE_REGS_NUM (allocno)));
1236 remove_allocno_from_bucket_and_push (allocno, false);
1238 ira_assert (colorable_allocno_bucket == NULL
1239 && uncolorable_allocno_bucket == NULL);
1240 for (i = 0; i < ira_reg_class_cover_size; i++)
1242 cover_class = ira_reg_class_cover[i];
1243 ira_assert (uncolorable_allocnos_num[cover_class] == 0);
1244 if (uncolorable_allocnos_splay_tree[cover_class] != NULL)
1245 splay_tree_delete (uncolorable_allocnos_splay_tree[cover_class]);
1249 /* Pop the coloring stack and assign hard registers to the popped
1250 allocnos. */
1251 static void
1252 pop_allocnos_from_stack (void)
1254 ira_allocno_t allocno;
1255 enum reg_class cover_class;
1257 for (;VEC_length (ira_allocno_t, allocno_stack_vec) != 0;)
1259 allocno = VEC_pop (ira_allocno_t, allocno_stack_vec);
1260 cover_class = ALLOCNO_COVER_CLASS (allocno);
1261 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1263 fprintf (ira_dump_file, " Popping");
1264 ira_print_expanded_allocno (allocno);
1265 fprintf (ira_dump_file, " -- ");
1267 if (cover_class == NO_REGS)
1269 ALLOCNO_HARD_REGNO (allocno) = -1;
1270 ALLOCNO_ASSIGNED_P (allocno) = true;
1271 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (allocno) == NULL);
1272 ira_assert
1273 (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (allocno) == NULL);
1274 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1275 fprintf (ira_dump_file, "assign memory\n");
1277 else if (assign_hard_reg (allocno, false))
1279 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1280 fprintf (ira_dump_file, "assign reg %d\n",
1281 ALLOCNO_HARD_REGNO (allocno));
1283 else if (ALLOCNO_ASSIGNED_P (allocno))
1285 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1286 fprintf (ira_dump_file, "spill\n");
1288 ALLOCNO_IN_GRAPH_P (allocno) = true;
1292 /* Loop over all subobjects of allocno A, collecting total hard
1293 register conflicts in PSET (which the caller must initialize). */
1294 static void
1295 all_conflicting_hard_regs (ira_allocno_t a, HARD_REG_SET *pset)
1297 int i;
1298 int n = ALLOCNO_NUM_OBJECTS (a);
1300 for (i = 0; i < n; i++)
1302 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1304 IOR_HARD_REG_SET (*pset, OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1308 /* Set up number of available hard registers for allocno A. */
1309 static void
1310 setup_allocno_available_regs_num (ira_allocno_t a)
1312 int i, n, hard_regs_num, hard_regno;
1313 enum machine_mode mode;
1314 enum reg_class cover_class;
1315 HARD_REG_SET temp_set;
1317 cover_class = ALLOCNO_COVER_CLASS (a);
1318 ALLOCNO_AVAILABLE_REGS_NUM (a) = ira_available_class_regs[cover_class];
1319 if (cover_class == NO_REGS)
1320 return;
1321 CLEAR_HARD_REG_SET (temp_set);
1322 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
1323 hard_regs_num = ira_class_hard_regs_num[cover_class];
1324 all_conflicting_hard_regs (a, &temp_set);
1326 mode = ALLOCNO_MODE (a);
1327 for (n = 0, i = hard_regs_num - 1; i >= 0; i--)
1329 hard_regno = ira_class_hard_regs[cover_class][i];
1330 if (TEST_HARD_REG_BIT (temp_set, hard_regno)
1331 || TEST_HARD_REG_BIT (prohibited_class_mode_regs[cover_class][mode],
1332 hard_regno))
1333 n++;
1335 if (internal_flag_ira_verbose > 2 && n > 0 && ira_dump_file != NULL)
1336 fprintf (ira_dump_file, " Reg %d of %s has %d regs less\n",
1337 ALLOCNO_REGNO (a), reg_class_names[cover_class], n);
1338 ALLOCNO_AVAILABLE_REGS_NUM (a) -= n;
1341 /* Set up ALLOCNO_LEFT_CONFLICTS_SIZE for allocno A. */
1342 static void
1343 setup_allocno_left_conflicts_size (ira_allocno_t a)
1345 int i, hard_regs_num, hard_regno, conflict_allocnos_size;
1346 enum reg_class cover_class;
1347 HARD_REG_SET temp_set;
1349 cover_class = ALLOCNO_COVER_CLASS (a);
1350 hard_regs_num = ira_class_hard_regs_num[cover_class];
1351 CLEAR_HARD_REG_SET (temp_set);
1352 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
1353 all_conflicting_hard_regs (a, &temp_set);
1355 AND_HARD_REG_SET (temp_set, reg_class_contents[cover_class]);
1356 AND_COMPL_HARD_REG_SET (temp_set, ira_no_alloc_regs);
1358 conflict_allocnos_size = 0;
1359 if (! hard_reg_set_empty_p (temp_set))
1360 for (i = 0; i < (int) hard_regs_num; i++)
1362 hard_regno = ira_class_hard_regs[cover_class][i];
1363 if (TEST_HARD_REG_BIT (temp_set, hard_regno))
1365 conflict_allocnos_size++;
1366 CLEAR_HARD_REG_BIT (temp_set, hard_regno);
1367 if (hard_reg_set_empty_p (temp_set))
1368 break;
1371 CLEAR_HARD_REG_SET (temp_set);
1372 if (cover_class != NO_REGS)
1374 int n = ALLOCNO_NUM_OBJECTS (a);
1376 for (i = 0; i < n; i++)
1378 ira_object_t obj = ALLOCNO_OBJECT (a, i);
1379 ira_object_t conflict_obj;
1380 ira_object_conflict_iterator oci;
1382 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
1384 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
1386 ira_assert (cover_class
1387 == ALLOCNO_COVER_CLASS (conflict_a));
1388 if (! ALLOCNO_ASSIGNED_P (conflict_a))
1389 conflict_allocnos_size
1390 += (ira_reg_class_nregs
1391 [cover_class][ALLOCNO_MODE (conflict_a)]);
1392 else if ((hard_regno = ALLOCNO_HARD_REGNO (conflict_a))
1393 >= 0)
1395 int last = (hard_regno
1396 + hard_regno_nregs
1397 [hard_regno][ALLOCNO_MODE (conflict_a)]);
1399 while (hard_regno < last)
1401 if (! TEST_HARD_REG_BIT (temp_set, hard_regno))
1403 conflict_allocnos_size++;
1404 SET_HARD_REG_BIT (temp_set, hard_regno);
1406 hard_regno++;
1412 ALLOCNO_LEFT_CONFLICTS_SIZE (a) = conflict_allocnos_size;
1415 /* Put ALLOCNO in a bucket corresponding to its number and size of its
1416 conflicting allocnos and hard registers. */
1417 static void
1418 put_allocno_into_bucket (ira_allocno_t allocno)
1420 enum reg_class cover_class;
1422 cover_class = ALLOCNO_COVER_CLASS (allocno);
1423 ALLOCNO_IN_GRAPH_P (allocno) = true;
1424 setup_allocno_left_conflicts_size (allocno);
1425 setup_allocno_available_regs_num (allocno);
1426 if (ALLOCNO_LEFT_CONFLICTS_SIZE (allocno)
1427 + ira_reg_class_nregs[cover_class][ALLOCNO_MODE (allocno)]
1428 <= ALLOCNO_AVAILABLE_REGS_NUM (allocno))
1429 add_allocno_to_bucket (allocno, &colorable_allocno_bucket);
1430 else
1431 add_allocno_to_bucket (allocno, &uncolorable_allocno_bucket);
1434 /* Map: allocno number -> allocno priority. */
1435 static int *allocno_priorities;
1437 /* Set up priorities for N allocnos in array
1438 CONSIDERATION_ALLOCNOS. */
1439 static void
1440 setup_allocno_priorities (ira_allocno_t *consideration_allocnos, int n)
1442 int i, length, nrefs, priority, max_priority, mult;
1443 ira_allocno_t a;
1445 max_priority = 0;
1446 for (i = 0; i < n; i++)
1448 a = consideration_allocnos[i];
1449 nrefs = ALLOCNO_NREFS (a);
1450 ira_assert (nrefs >= 0);
1451 mult = floor_log2 (ALLOCNO_NREFS (a)) + 1;
1452 ira_assert (mult >= 0);
1453 allocno_priorities[ALLOCNO_NUM (a)]
1454 = priority
1455 = (mult
1456 * (ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a))
1457 * ira_reg_class_nregs[ALLOCNO_COVER_CLASS (a)][ALLOCNO_MODE (a)]);
1458 if (priority < 0)
1459 priority = -priority;
1460 if (max_priority < priority)
1461 max_priority = priority;
1463 mult = max_priority == 0 ? 1 : INT_MAX / max_priority;
1464 for (i = 0; i < n; i++)
1466 a = consideration_allocnos[i];
1467 length = ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1468 if (ALLOCNO_NUM_OBJECTS (a) > 1)
1469 length /= ALLOCNO_NUM_OBJECTS (a);
1470 if (length <= 0)
1471 length = 1;
1472 allocno_priorities[ALLOCNO_NUM (a)]
1473 = allocno_priorities[ALLOCNO_NUM (a)] * mult / length;
1477 /* Sort allocnos according to their priorities which are calculated
1478 analogous to ones in file `global.c'. */
1479 static int
1480 allocno_priority_compare_func (const void *v1p, const void *v2p)
1482 ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
1483 ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
1484 int pri1, pri2;
1486 pri1 = allocno_priorities[ALLOCNO_NUM (a1)];
1487 pri2 = allocno_priorities[ALLOCNO_NUM (a2)];
1488 if (pri2 != pri1)
1489 return SORTGT (pri2, pri1);
1491 /* If regs are equally good, sort by allocnos, so that the results of
1492 qsort leave nothing to chance. */
1493 return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
1496 /* Chaitin-Briggs coloring for allocnos in COLORING_ALLOCNO_BITMAP
1497 taking into account allocnos in CONSIDERATION_ALLOCNO_BITMAP. */
1498 static void
1499 color_allocnos (void)
1501 unsigned int i, n;
1502 bitmap_iterator bi;
1503 ira_allocno_t a;
1505 if (flag_ira_algorithm == IRA_ALGORITHM_PRIORITY)
1507 n = 0;
1508 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1510 a = ira_allocnos[i];
1511 if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
1513 ALLOCNO_HARD_REGNO (a) = -1;
1514 ALLOCNO_ASSIGNED_P (a) = true;
1515 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
1516 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
1517 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1519 fprintf (ira_dump_file, " Spill");
1520 ira_print_expanded_allocno (a);
1521 fprintf (ira_dump_file, "\n");
1523 continue;
1525 sorted_allocnos[n++] = a;
1527 if (n != 0)
1529 setup_allocno_priorities (sorted_allocnos, n);
1530 qsort (sorted_allocnos, n, sizeof (ira_allocno_t),
1531 allocno_priority_compare_func);
1532 for (i = 0; i < n; i++)
1534 a = sorted_allocnos[i];
1535 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1537 fprintf (ira_dump_file, " ");
1538 ira_print_expanded_allocno (a);
1539 fprintf (ira_dump_file, " -- ");
1541 if (assign_hard_reg (a, false))
1543 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1544 fprintf (ira_dump_file, "assign hard reg %d\n",
1545 ALLOCNO_HARD_REGNO (a));
1547 else
1549 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1550 fprintf (ira_dump_file, "assign memory\n");
1555 else
1557 /* Put the allocnos into the corresponding buckets. */
1558 colorable_allocno_bucket = NULL;
1559 uncolorable_allocno_bucket = NULL;
1560 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, i, bi)
1562 a = ira_allocnos[i];
1563 if (ALLOCNO_COVER_CLASS (a) == NO_REGS)
1565 ALLOCNO_HARD_REGNO (a) = -1;
1566 ALLOCNO_ASSIGNED_P (a) = true;
1567 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
1568 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
1569 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1571 fprintf (ira_dump_file, " Spill");
1572 ira_print_expanded_allocno (a);
1573 fprintf (ira_dump_file, "\n");
1575 continue;
1577 put_allocno_into_bucket (a);
1579 push_allocnos_to_stack ();
1580 pop_allocnos_from_stack ();
1586 /* Output information about the loop given by its LOOP_TREE_NODE. */
1587 static void
1588 print_loop_title (ira_loop_tree_node_t loop_tree_node)
1590 unsigned int j;
1591 bitmap_iterator bi;
1592 ira_loop_tree_node_t subloop_node, dest_loop_node;
1593 edge e;
1594 edge_iterator ei;
1596 ira_assert (loop_tree_node->loop != NULL);
1597 fprintf (ira_dump_file,
1598 "\n Loop %d (parent %d, header bb%d, depth %d)\n bbs:",
1599 loop_tree_node->loop->num,
1600 (loop_tree_node->parent == NULL
1601 ? -1 : loop_tree_node->parent->loop->num),
1602 loop_tree_node->loop->header->index,
1603 loop_depth (loop_tree_node->loop));
1604 for (subloop_node = loop_tree_node->children;
1605 subloop_node != NULL;
1606 subloop_node = subloop_node->next)
1607 if (subloop_node->bb != NULL)
1609 fprintf (ira_dump_file, " %d", subloop_node->bb->index);
1610 FOR_EACH_EDGE (e, ei, subloop_node->bb->succs)
1611 if (e->dest != EXIT_BLOCK_PTR
1612 && ((dest_loop_node = IRA_BB_NODE (e->dest)->parent)
1613 != loop_tree_node))
1614 fprintf (ira_dump_file, "(->%d:l%d)",
1615 e->dest->index, dest_loop_node->loop->num);
1617 fprintf (ira_dump_file, "\n all:");
1618 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
1619 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
1620 fprintf (ira_dump_file, "\n modified regnos:");
1621 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->modified_regnos, 0, j, bi)
1622 fprintf (ira_dump_file, " %d", j);
1623 fprintf (ira_dump_file, "\n border:");
1624 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->border_allocnos, 0, j, bi)
1625 fprintf (ira_dump_file, " %dr%d", j, ALLOCNO_REGNO (ira_allocnos[j]));
1626 fprintf (ira_dump_file, "\n Pressure:");
1627 for (j = 0; (int) j < ira_reg_class_cover_size; j++)
1629 enum reg_class cover_class;
1631 cover_class = ira_reg_class_cover[j];
1632 if (loop_tree_node->reg_pressure[cover_class] == 0)
1633 continue;
1634 fprintf (ira_dump_file, " %s=%d", reg_class_names[cover_class],
1635 loop_tree_node->reg_pressure[cover_class]);
1637 fprintf (ira_dump_file, "\n");
1640 /* Color the allocnos inside loop (in the extreme case it can be all
1641 of the function) given the corresponding LOOP_TREE_NODE. The
1642 function is called for each loop during top-down traverse of the
1643 loop tree. */
1644 static void
1645 color_pass (ira_loop_tree_node_t loop_tree_node)
1647 int regno, hard_regno, index = -1;
1648 int cost, exit_freq, enter_freq;
1649 unsigned int j;
1650 bitmap_iterator bi;
1651 enum machine_mode mode;
1652 enum reg_class rclass, cover_class;
1653 ira_allocno_t a, subloop_allocno;
1654 ira_loop_tree_node_t subloop_node;
1656 ira_assert (loop_tree_node->bb == NULL);
1657 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1658 print_loop_title (loop_tree_node);
1660 bitmap_copy (coloring_allocno_bitmap, loop_tree_node->all_allocnos);
1661 bitmap_copy (consideration_allocno_bitmap, coloring_allocno_bitmap);
1662 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
1664 a = ira_allocnos[j];
1665 if (ALLOCNO_ASSIGNED_P (a))
1666 bitmap_clear_bit (coloring_allocno_bitmap, ALLOCNO_NUM (a));
1668 /* Color all mentioned allocnos including transparent ones. */
1669 color_allocnos ();
1670 /* Process caps. They are processed just once. */
1671 if (flag_ira_region == IRA_REGION_MIXED
1672 || flag_ira_region == IRA_REGION_ALL)
1673 EXECUTE_IF_SET_IN_BITMAP (loop_tree_node->all_allocnos, 0, j, bi)
1675 a = ira_allocnos[j];
1676 if (ALLOCNO_CAP_MEMBER (a) == NULL)
1677 continue;
1678 /* Remove from processing in the next loop. */
1679 bitmap_clear_bit (consideration_allocno_bitmap, j);
1680 rclass = ALLOCNO_COVER_CLASS (a);
1681 if (flag_ira_region == IRA_REGION_MIXED
1682 && (loop_tree_node->reg_pressure[rclass]
1683 <= ira_available_class_regs[rclass]))
1685 mode = ALLOCNO_MODE (a);
1686 hard_regno = ALLOCNO_HARD_REGNO (a);
1687 if (hard_regno >= 0)
1689 index = ira_class_hard_reg_index[rclass][hard_regno];
1690 ira_assert (index >= 0);
1692 regno = ALLOCNO_REGNO (a);
1693 subloop_allocno = ALLOCNO_CAP_MEMBER (a);
1694 subloop_node = ALLOCNO_LOOP_TREE_NODE (subloop_allocno);
1695 ira_assert (!ALLOCNO_ASSIGNED_P (subloop_allocno));
1696 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1697 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1698 if (hard_regno >= 0)
1699 update_copy_costs (subloop_allocno, true);
1700 /* We don't need updated costs anymore: */
1701 ira_free_allocno_updated_costs (subloop_allocno);
1704 /* Update costs of the corresponding allocnos (not caps) in the
1705 subloops. */
1706 for (subloop_node = loop_tree_node->subloops;
1707 subloop_node != NULL;
1708 subloop_node = subloop_node->subloop_next)
1710 ira_assert (subloop_node->bb == NULL);
1711 EXECUTE_IF_SET_IN_BITMAP (consideration_allocno_bitmap, 0, j, bi)
1713 a = ira_allocnos[j];
1714 ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1715 mode = ALLOCNO_MODE (a);
1716 rclass = ALLOCNO_COVER_CLASS (a);
1717 hard_regno = ALLOCNO_HARD_REGNO (a);
1718 /* Use hard register class here. ??? */
1719 if (hard_regno >= 0)
1721 index = ira_class_hard_reg_index[rclass][hard_regno];
1722 ira_assert (index >= 0);
1724 regno = ALLOCNO_REGNO (a);
1725 /* ??? conflict costs */
1726 subloop_allocno = subloop_node->regno_allocno_map[regno];
1727 if (subloop_allocno == NULL
1728 || ALLOCNO_CAP (subloop_allocno) != NULL)
1729 continue;
1730 ira_assert (ALLOCNO_COVER_CLASS (subloop_allocno) == rclass);
1731 ira_assert (bitmap_bit_p (subloop_node->all_allocnos,
1732 ALLOCNO_NUM (subloop_allocno)));
1733 if ((flag_ira_region == IRA_REGION_MIXED)
1734 && (loop_tree_node->reg_pressure[rclass]
1735 <= ira_available_class_regs[rclass]))
1737 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
1739 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1740 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1741 if (hard_regno >= 0)
1742 update_copy_costs (subloop_allocno, true);
1743 /* We don't need updated costs anymore: */
1744 ira_free_allocno_updated_costs (subloop_allocno);
1746 continue;
1748 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
1749 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
1750 ira_assert (regno < ira_reg_equiv_len);
1751 if (ira_reg_equiv_invariant_p[regno]
1752 || ira_reg_equiv_const[regno] != NULL_RTX)
1754 if (! ALLOCNO_ASSIGNED_P (subloop_allocno))
1756 ALLOCNO_HARD_REGNO (subloop_allocno) = hard_regno;
1757 ALLOCNO_ASSIGNED_P (subloop_allocno) = true;
1758 if (hard_regno >= 0)
1759 update_copy_costs (subloop_allocno, true);
1760 /* We don't need updated costs anymore: */
1761 ira_free_allocno_updated_costs (subloop_allocno);
1764 else if (hard_regno < 0)
1766 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
1767 -= ((ira_memory_move_cost[mode][rclass][1] * enter_freq)
1768 + (ira_memory_move_cost[mode][rclass][0] * exit_freq));
1770 else
1772 cover_class = ALLOCNO_COVER_CLASS (subloop_allocno);
1773 cost = (ira_get_register_move_cost (mode, rclass, rclass)
1774 * (exit_freq + enter_freq));
1775 ira_allocate_and_set_or_copy_costs
1776 (&ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno), cover_class,
1777 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno),
1778 ALLOCNO_HARD_REG_COSTS (subloop_allocno));
1779 ira_allocate_and_set_or_copy_costs
1780 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno),
1781 cover_class, 0,
1782 ALLOCNO_CONFLICT_HARD_REG_COSTS (subloop_allocno));
1783 ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index] -= cost;
1784 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (subloop_allocno)[index]
1785 -= cost;
1786 if (ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
1787 > ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index])
1788 ALLOCNO_UPDATED_COVER_CLASS_COST (subloop_allocno)
1789 = ALLOCNO_UPDATED_HARD_REG_COSTS (subloop_allocno)[index];
1790 ALLOCNO_UPDATED_MEMORY_COST (subloop_allocno)
1791 += (ira_memory_move_cost[mode][rclass][0] * enter_freq
1792 + ira_memory_move_cost[mode][rclass][1] * exit_freq);
1798 /* Initialize the common data for coloring and calls functions to do
1799 Chaitin-Briggs and regional coloring. */
1800 static void
1801 do_coloring (void)
1803 coloring_allocno_bitmap = ira_allocate_bitmap ();
1804 allocnos_for_spilling
1805 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
1806 * ira_allocnos_num);
1807 splay_tree_node_pool = create_alloc_pool ("splay tree nodes",
1808 sizeof (struct splay_tree_node_s),
1809 100);
1810 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
1811 fprintf (ira_dump_file, "\n**** Allocnos coloring:\n\n");
1813 ira_traverse_loop_tree (false, ira_loop_tree_root, color_pass, NULL);
1815 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1816 ira_print_disposition (ira_dump_file);
1818 free_alloc_pool (splay_tree_node_pool);
1819 ira_free_bitmap (coloring_allocno_bitmap);
1820 ira_free (allocnos_for_spilling);
1825 /* Move spill/restore code, which are to be generated in ira-emit.c,
1826 to less frequent points (if it is profitable) by reassigning some
1827 allocnos (in loop with subloops containing in another loop) to
1828 memory which results in longer live-range where the corresponding
1829 pseudo-registers will be in memory. */
1830 static void
1831 move_spill_restore (void)
1833 int cost, regno, hard_regno, hard_regno2, index;
1834 bool changed_p;
1835 int enter_freq, exit_freq;
1836 enum machine_mode mode;
1837 enum reg_class rclass;
1838 ira_allocno_t a, parent_allocno, subloop_allocno;
1839 ira_loop_tree_node_t parent, loop_node, subloop_node;
1840 ira_allocno_iterator ai;
1842 for (;;)
1844 changed_p = false;
1845 if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
1846 fprintf (ira_dump_file, "New iteration of spill/restore move\n");
1847 FOR_EACH_ALLOCNO (a, ai)
1849 regno = ALLOCNO_REGNO (a);
1850 loop_node = ALLOCNO_LOOP_TREE_NODE (a);
1851 if (ALLOCNO_CAP_MEMBER (a) != NULL
1852 || ALLOCNO_CAP (a) != NULL
1853 || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0
1854 || loop_node->children == NULL
1855 /* don't do the optimization because it can create
1856 copies and the reload pass can spill the allocno set
1857 by copy although the allocno will not get memory
1858 slot. */
1859 || ira_reg_equiv_invariant_p[regno]
1860 || ira_reg_equiv_const[regno] != NULL_RTX
1861 || !bitmap_bit_p (loop_node->border_allocnos, ALLOCNO_NUM (a)))
1862 continue;
1863 mode = ALLOCNO_MODE (a);
1864 rclass = ALLOCNO_COVER_CLASS (a);
1865 index = ira_class_hard_reg_index[rclass][hard_regno];
1866 ira_assert (index >= 0);
1867 cost = (ALLOCNO_MEMORY_COST (a)
1868 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
1869 ? ALLOCNO_COVER_CLASS_COST (a)
1870 : ALLOCNO_HARD_REG_COSTS (a)[index]));
1871 for (subloop_node = loop_node->subloops;
1872 subloop_node != NULL;
1873 subloop_node = subloop_node->subloop_next)
1875 ira_assert (subloop_node->bb == NULL);
1876 subloop_allocno = subloop_node->regno_allocno_map[regno];
1877 if (subloop_allocno == NULL)
1878 continue;
1879 ira_assert (rclass == ALLOCNO_COVER_CLASS (subloop_allocno));
1880 /* We have accumulated cost. To get the real cost of
1881 allocno usage in the loop we should subtract costs of
1882 the subloop allocnos. */
1883 cost -= (ALLOCNO_MEMORY_COST (subloop_allocno)
1884 - (ALLOCNO_HARD_REG_COSTS (subloop_allocno) == NULL
1885 ? ALLOCNO_COVER_CLASS_COST (subloop_allocno)
1886 : ALLOCNO_HARD_REG_COSTS (subloop_allocno)[index]));
1887 exit_freq = ira_loop_edge_freq (subloop_node, regno, true);
1888 enter_freq = ira_loop_edge_freq (subloop_node, regno, false);
1889 if ((hard_regno2 = ALLOCNO_HARD_REGNO (subloop_allocno)) < 0)
1890 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
1891 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1892 else
1894 cost
1895 += (ira_memory_move_cost[mode][rclass][0] * exit_freq
1896 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1897 if (hard_regno2 != hard_regno)
1898 cost -= (ira_get_register_move_cost (mode, rclass, rclass)
1899 * (exit_freq + enter_freq));
1902 if ((parent = loop_node->parent) != NULL
1903 && (parent_allocno = parent->regno_allocno_map[regno]) != NULL)
1905 ira_assert (rclass == ALLOCNO_COVER_CLASS (parent_allocno));
1906 exit_freq = ira_loop_edge_freq (loop_node, regno, true);
1907 enter_freq = ira_loop_edge_freq (loop_node, regno, false);
1908 if ((hard_regno2 = ALLOCNO_HARD_REGNO (parent_allocno)) < 0)
1909 cost -= (ira_memory_move_cost[mode][rclass][0] * exit_freq
1910 + ira_memory_move_cost[mode][rclass][1] * enter_freq);
1911 else
1913 cost
1914 += (ira_memory_move_cost[mode][rclass][1] * exit_freq
1915 + ira_memory_move_cost[mode][rclass][0] * enter_freq);
1916 if (hard_regno2 != hard_regno)
1917 cost -= (ira_get_register_move_cost (mode, rclass, rclass)
1918 * (exit_freq + enter_freq));
1921 if (cost < 0)
1923 ALLOCNO_HARD_REGNO (a) = -1;
1924 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
1926 fprintf
1927 (ira_dump_file,
1928 " Moving spill/restore for a%dr%d up from loop %d",
1929 ALLOCNO_NUM (a), regno, loop_node->loop->num);
1930 fprintf (ira_dump_file, " - profit %d\n", -cost);
1932 changed_p = true;
1935 if (! changed_p)
1936 break;
1942 /* Update current hard reg costs and current conflict hard reg costs
1943 for allocno A. It is done by processing its copies containing
1944 other allocnos already assigned. */
1945 static void
1946 update_curr_costs (ira_allocno_t a)
1948 int i, hard_regno, cost;
1949 enum machine_mode mode;
1950 enum reg_class cover_class, rclass;
1951 ira_allocno_t another_a;
1952 ira_copy_t cp, next_cp;
1954 ira_free_allocno_updated_costs (a);
1955 ira_assert (! ALLOCNO_ASSIGNED_P (a));
1956 cover_class = ALLOCNO_COVER_CLASS (a);
1957 if (cover_class == NO_REGS)
1958 return;
1959 mode = ALLOCNO_MODE (a);
1960 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1962 if (cp->first == a)
1964 next_cp = cp->next_first_allocno_copy;
1965 another_a = cp->second;
1967 else if (cp->second == a)
1969 next_cp = cp->next_second_allocno_copy;
1970 another_a = cp->first;
1972 else
1973 gcc_unreachable ();
1974 if (! ira_reg_classes_intersect_p[cover_class][ALLOCNO_COVER_CLASS
1975 (another_a)]
1976 || ! ALLOCNO_ASSIGNED_P (another_a)
1977 || (hard_regno = ALLOCNO_HARD_REGNO (another_a)) < 0)
1978 continue;
1979 rclass = REGNO_REG_CLASS (hard_regno);
1980 i = ira_class_hard_reg_index[cover_class][hard_regno];
1981 if (i < 0)
1982 continue;
1983 cost = (cp->first == a
1984 ? ira_get_register_move_cost (mode, rclass, cover_class)
1985 : ira_get_register_move_cost (mode, cover_class, rclass));
1986 ira_allocate_and_set_or_copy_costs
1987 (&ALLOCNO_UPDATED_HARD_REG_COSTS (a),
1988 cover_class, ALLOCNO_COVER_CLASS_COST (a),
1989 ALLOCNO_HARD_REG_COSTS (a));
1990 ira_allocate_and_set_or_copy_costs
1991 (&ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1992 cover_class, 0, ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1993 ALLOCNO_UPDATED_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
1994 ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a)[i] -= cp->freq * cost;
1998 /* Try to assign hard registers to the unassigned allocnos and
1999 allocnos conflicting with them or conflicting with allocnos whose
2000 regno >= START_REGNO. The function is called after ira_flattening,
2001 so more allocnos (including ones created in ira-emit.c) will have a
2002 chance to get a hard register. We use simple assignment algorithm
2003 based on priorities. */
2004 void
2005 ira_reassign_conflict_allocnos (int start_regno)
2007 int i, allocnos_to_color_num;
2008 ira_allocno_t a;
2009 enum reg_class cover_class;
2010 bitmap allocnos_to_color;
2011 ira_allocno_iterator ai;
2013 allocnos_to_color = ira_allocate_bitmap ();
2014 allocnos_to_color_num = 0;
2015 FOR_EACH_ALLOCNO (a, ai)
2017 int n = ALLOCNO_NUM_OBJECTS (a);
2019 if (! ALLOCNO_ASSIGNED_P (a)
2020 && ! bitmap_bit_p (allocnos_to_color, ALLOCNO_NUM (a)))
2022 if (ALLOCNO_COVER_CLASS (a) != NO_REGS)
2023 sorted_allocnos[allocnos_to_color_num++] = a;
2024 else
2026 ALLOCNO_ASSIGNED_P (a) = true;
2027 ALLOCNO_HARD_REGNO (a) = -1;
2028 ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2029 ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2031 bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (a));
2033 if (ALLOCNO_REGNO (a) < start_regno
2034 || (cover_class = ALLOCNO_COVER_CLASS (a)) == NO_REGS)
2035 continue;
2036 for (i = 0; i < n; i++)
2038 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2039 ira_object_t conflict_obj;
2040 ira_object_conflict_iterator oci;
2041 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2043 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2044 ira_assert (ira_reg_classes_intersect_p
2045 [cover_class][ALLOCNO_COVER_CLASS (conflict_a)]);
2046 if (!bitmap_set_bit (allocnos_to_color, ALLOCNO_NUM (conflict_a)))
2047 continue;
2048 sorted_allocnos[allocnos_to_color_num++] = conflict_a;
2052 ira_free_bitmap (allocnos_to_color);
2053 if (allocnos_to_color_num > 1)
2055 setup_allocno_priorities (sorted_allocnos, allocnos_to_color_num);
2056 qsort (sorted_allocnos, allocnos_to_color_num, sizeof (ira_allocno_t),
2057 allocno_priority_compare_func);
2059 for (i = 0; i < allocnos_to_color_num; i++)
2061 a = sorted_allocnos[i];
2062 ALLOCNO_ASSIGNED_P (a) = false;
2063 update_curr_costs (a);
2065 for (i = 0; i < allocnos_to_color_num; i++)
2067 a = sorted_allocnos[i];
2068 if (assign_hard_reg (a, true))
2070 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2071 fprintf
2072 (ira_dump_file,
2073 " Secondary allocation: assign hard reg %d to reg %d\n",
2074 ALLOCNO_HARD_REGNO (a), ALLOCNO_REGNO (a));
2081 /* This page contains code to coalesce memory stack slots used by
2082 spilled allocnos. This results in smaller stack frame, better data
2083 locality, and in smaller code for some architectures like
2084 x86/x86_64 where insn size depends on address displacement value.
2085 On the other hand, it can worsen insn scheduling after the RA but
2086 in practice it is less important than smaller stack frames. */
2088 /* TRUE if we coalesced some allocnos. In other words, if we got
2089 loops formed by members first_coalesced_allocno and
2090 next_coalesced_allocno containing more one allocno. */
2091 static bool allocno_coalesced_p;
2093 /* Bitmap used to prevent a repeated allocno processing because of
2094 coalescing. */
2095 static bitmap processed_coalesced_allocno_bitmap;
2097 /* The function is used to sort allocnos according to their execution
2098 frequencies. */
2099 static int
2100 copy_freq_compare_func (const void *v1p, const void *v2p)
2102 ira_copy_t cp1 = *(const ira_copy_t *) v1p, cp2 = *(const ira_copy_t *) v2p;
2103 int pri1, pri2;
2105 pri1 = cp1->freq;
2106 pri2 = cp2->freq;
2107 if (pri2 - pri1)
2108 return pri2 - pri1;
2110 /* If freqencies are equal, sort by copies, so that the results of
2111 qsort leave nothing to chance. */
2112 return cp1->num - cp2->num;
2115 /* Merge two sets of coalesced allocnos given correspondingly by
2116 allocnos A1 and A2 (more accurately merging A2 set into A1
2117 set). */
2118 static void
2119 merge_allocnos (ira_allocno_t a1, ira_allocno_t a2)
2121 ira_allocno_t a, first, last, next;
2123 first = ALLOCNO_FIRST_COALESCED_ALLOCNO (a1);
2124 if (first == ALLOCNO_FIRST_COALESCED_ALLOCNO (a2))
2125 return;
2126 for (last = a2, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
2127 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2129 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = first;
2130 if (a == a2)
2131 break;
2132 last = a;
2134 next = ALLOCNO_NEXT_COALESCED_ALLOCNO (first);
2135 ALLOCNO_NEXT_COALESCED_ALLOCNO (first) = a2;
2136 ALLOCNO_NEXT_COALESCED_ALLOCNO (last) = next;
2139 /* Given two sets of coalesced sets of allocnos, A1 and A2, this
2140 function determines if any conflicts exist between the two sets.
2141 We use live ranges to find conflicts because conflicts are
2142 represented only for allocnos of the same cover class and during
2143 the reload pass we coalesce allocnos for sharing stack memory
2144 slots. */
2145 static bool
2146 coalesced_allocno_conflict_p (ira_allocno_t a1, ira_allocno_t a2)
2148 ira_allocno_t a, conflict_allocno;
2150 bitmap_clear (processed_coalesced_allocno_bitmap);
2151 if (allocno_coalesced_p)
2153 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
2154 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2156 bitmap_set_bit (processed_coalesced_allocno_bitmap,
2157 OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (a, 0)));
2158 if (a == a1)
2159 break;
2162 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a2);;
2163 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2165 for (conflict_allocno = ALLOCNO_NEXT_COALESCED_ALLOCNO (a1);;
2166 conflict_allocno
2167 = ALLOCNO_NEXT_COALESCED_ALLOCNO (conflict_allocno))
2169 if (allocnos_have_intersected_live_ranges_p (a, conflict_allocno))
2170 return true;
2171 if (conflict_allocno == a1)
2172 break;
2175 if (a == a2)
2176 break;
2178 return false;
2181 /* The major function for aggressive allocno coalescing. We coalesce
2182 only spilled allocnos. If some allocnos have been coalesced, we
2183 set up flag allocno_coalesced_p. */
2184 static void
2185 coalesce_allocnos (void)
2187 ira_allocno_t a;
2188 ira_copy_t cp, next_cp, *sorted_copies;
2189 unsigned int j;
2190 int i, n, cp_num, regno;
2191 bitmap_iterator bi;
2193 sorted_copies = (ira_copy_t *) ira_allocate (ira_copies_num
2194 * sizeof (ira_copy_t));
2195 cp_num = 0;
2196 /* Collect copies. */
2197 EXECUTE_IF_SET_IN_BITMAP (coloring_allocno_bitmap, 0, j, bi)
2199 a = ira_allocnos[j];
2200 regno = ALLOCNO_REGNO (a);
2201 if (! ALLOCNO_ASSIGNED_P (a) || ALLOCNO_HARD_REGNO (a) >= 0
2202 || (regno < ira_reg_equiv_len
2203 && (ira_reg_equiv_const[regno] != NULL_RTX
2204 || ira_reg_equiv_invariant_p[regno])))
2205 continue;
2206 for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2208 if (cp->first == a)
2210 next_cp = cp->next_first_allocno_copy;
2211 regno = ALLOCNO_REGNO (cp->second);
2212 /* For priority coloring we coalesce allocnos only with
2213 the same cover class not with intersected cover
2214 classes as it were possible. It is done for
2215 simplicity. */
2216 if ((cp->insn != NULL || cp->constraint_p)
2217 && ALLOCNO_ASSIGNED_P (cp->second)
2218 && ALLOCNO_HARD_REGNO (cp->second) < 0
2219 && (regno >= ira_reg_equiv_len
2220 || (! ira_reg_equiv_invariant_p[regno]
2221 && ira_reg_equiv_const[regno] == NULL_RTX)))
2222 sorted_copies[cp_num++] = cp;
2224 else if (cp->second == a)
2225 next_cp = cp->next_second_allocno_copy;
2226 else
2227 gcc_unreachable ();
2230 qsort (sorted_copies, cp_num, sizeof (ira_copy_t), copy_freq_compare_func);
2231 /* Coalesced copies, most frequently executed first. */
2232 for (; cp_num != 0;)
2234 for (i = 0; i < cp_num; i++)
2236 cp = sorted_copies[i];
2237 if (! coalesced_allocno_conflict_p (cp->first, cp->second))
2239 allocno_coalesced_p = true;
2240 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2241 fprintf
2242 (ira_dump_file,
2243 " Coalescing copy %d:a%dr%d-a%dr%d (freq=%d)\n",
2244 cp->num, ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
2245 ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second),
2246 cp->freq);
2247 merge_allocnos (cp->first, cp->second);
2248 i++;
2249 break;
2252 /* Collect the rest of copies. */
2253 for (n = 0; i < cp_num; i++)
2255 cp = sorted_copies[i];
2256 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->first)
2257 != ALLOCNO_FIRST_COALESCED_ALLOCNO (cp->second))
2258 sorted_copies[n++] = cp;
2260 cp_num = n;
2262 ira_free (sorted_copies);
2265 /* Usage cost and order number of coalesced allocno set to which
2266 given pseudo register belongs to. */
2267 static int *regno_coalesced_allocno_cost;
2268 static int *regno_coalesced_allocno_num;
2270 /* Sort pseudos according frequencies of coalesced allocno sets they
2271 belong to (putting most frequently ones first), and according to
2272 coalesced allocno set order numbers. */
2273 static int
2274 coalesced_pseudo_reg_freq_compare (const void *v1p, const void *v2p)
2276 const int regno1 = *(const int *) v1p;
2277 const int regno2 = *(const int *) v2p;
2278 int diff;
2280 if ((diff = (regno_coalesced_allocno_cost[regno2]
2281 - regno_coalesced_allocno_cost[regno1])) != 0)
2282 return diff;
2283 if ((diff = (regno_coalesced_allocno_num[regno1]
2284 - regno_coalesced_allocno_num[regno2])) != 0)
2285 return diff;
2286 return regno1 - regno2;
2289 /* Widest width in which each pseudo reg is referred to (via subreg).
2290 It is used for sorting pseudo registers. */
2291 static unsigned int *regno_max_ref_width;
2293 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1. */
2294 #ifdef STACK_GROWS_DOWNWARD
2295 # undef STACK_GROWS_DOWNWARD
2296 # define STACK_GROWS_DOWNWARD 1
2297 #else
2298 # define STACK_GROWS_DOWNWARD 0
2299 #endif
2301 /* Sort pseudos according their slot numbers (putting ones with
2302 smaller numbers first, or last when the frame pointer is not
2303 needed). */
2304 static int
2305 coalesced_pseudo_reg_slot_compare (const void *v1p, const void *v2p)
2307 const int regno1 = *(const int *) v1p;
2308 const int regno2 = *(const int *) v2p;
2309 ira_allocno_t a1 = ira_regno_allocno_map[regno1];
2310 ira_allocno_t a2 = ira_regno_allocno_map[regno2];
2311 int diff, slot_num1, slot_num2;
2312 int total_size1, total_size2;
2314 if (a1 == NULL || ALLOCNO_HARD_REGNO (a1) >= 0)
2316 if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2317 return regno1 - regno2;
2318 return 1;
2320 else if (a2 == NULL || ALLOCNO_HARD_REGNO (a2) >= 0)
2321 return -1;
2322 slot_num1 = -ALLOCNO_HARD_REGNO (a1);
2323 slot_num2 = -ALLOCNO_HARD_REGNO (a2);
2324 if ((diff = slot_num1 - slot_num2) != 0)
2325 return (frame_pointer_needed
2326 || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
2327 total_size1 = MAX (PSEUDO_REGNO_BYTES (regno1), regno_max_ref_width[regno1]);
2328 total_size2 = MAX (PSEUDO_REGNO_BYTES (regno2), regno_max_ref_width[regno2]);
2329 if ((diff = total_size2 - total_size1) != 0)
2330 return diff;
2331 return regno1 - regno2;
2334 /* Setup REGNO_COALESCED_ALLOCNO_COST and REGNO_COALESCED_ALLOCNO_NUM
2335 for coalesced allocno sets containing allocnos with their regnos
2336 given in array PSEUDO_REGNOS of length N. */
2337 static void
2338 setup_coalesced_allocno_costs_and_nums (int *pseudo_regnos, int n)
2340 int i, num, regno, cost;
2341 ira_allocno_t allocno, a;
2343 for (num = i = 0; i < n; i++)
2345 regno = pseudo_regnos[i];
2346 allocno = ira_regno_allocno_map[regno];
2347 if (allocno == NULL)
2349 regno_coalesced_allocno_cost[regno] = 0;
2350 regno_coalesced_allocno_num[regno] = ++num;
2351 continue;
2353 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2354 continue;
2355 num++;
2356 for (cost = 0, a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2357 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2359 cost += ALLOCNO_FREQ (a);
2360 if (a == allocno)
2361 break;
2363 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2364 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2366 regno_coalesced_allocno_num[ALLOCNO_REGNO (a)] = num;
2367 regno_coalesced_allocno_cost[ALLOCNO_REGNO (a)] = cost;
2368 if (a == allocno)
2369 break;
2374 /* Collect spilled allocnos representing coalesced allocno sets (the
2375 first coalesced allocno). The collected allocnos are returned
2376 through array SPILLED_COALESCED_ALLOCNOS. The function returns the
2377 number of the collected allocnos. The allocnos are given by their
2378 regnos in array PSEUDO_REGNOS of length N. */
2379 static int
2380 collect_spilled_coalesced_allocnos (int *pseudo_regnos, int n,
2381 ira_allocno_t *spilled_coalesced_allocnos)
2383 int i, num, regno;
2384 ira_allocno_t allocno;
2386 for (num = i = 0; i < n; i++)
2388 regno = pseudo_regnos[i];
2389 allocno = ira_regno_allocno_map[regno];
2390 if (allocno == NULL || ALLOCNO_HARD_REGNO (allocno) >= 0
2391 || ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno)
2392 continue;
2393 spilled_coalesced_allocnos[num++] = allocno;
2395 return num;
2398 /* Array of live ranges of size IRA_ALLOCNOS_NUM. Live range for
2399 given slot contains live ranges of coalesced allocnos assigned to
2400 given slot. */
2401 static live_range_t *slot_coalesced_allocnos_live_ranges;
2403 /* Return TRUE if coalesced allocnos represented by ALLOCNO has live
2404 ranges intersected with live ranges of coalesced allocnos assigned
2405 to slot with number N. */
2406 static bool
2407 slot_coalesced_allocno_live_ranges_intersect_p (ira_allocno_t allocno, int n)
2409 ira_allocno_t a;
2411 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2412 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2414 int i;
2415 int nr = ALLOCNO_NUM_OBJECTS (a);
2416 for (i = 0; i < nr; i++)
2418 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2419 if (ira_live_ranges_intersect_p (slot_coalesced_allocnos_live_ranges[n],
2420 OBJECT_LIVE_RANGES (obj)))
2421 return true;
2423 if (a == allocno)
2424 break;
2426 return false;
2429 /* Update live ranges of slot to which coalesced allocnos represented
2430 by ALLOCNO were assigned. */
2431 static void
2432 setup_slot_coalesced_allocno_live_ranges (ira_allocno_t allocno)
2434 int i, n;
2435 ira_allocno_t a;
2436 live_range_t r;
2438 n = ALLOCNO_TEMP (allocno);
2439 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2440 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2442 int nr = ALLOCNO_NUM_OBJECTS (a);
2443 for (i = 0; i < nr; i++)
2445 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2446 r = ira_copy_live_range_list (OBJECT_LIVE_RANGES (obj));
2447 slot_coalesced_allocnos_live_ranges[n]
2448 = ira_merge_live_ranges
2449 (slot_coalesced_allocnos_live_ranges[n], r);
2451 if (a == allocno)
2452 break;
2456 /* We have coalesced allocnos involving in copies. Coalesce allocnos
2457 further in order to share the same memory stack slot. Allocnos
2458 representing sets of allocnos coalesced before the call are given
2459 in array SPILLED_COALESCED_ALLOCNOS of length NUM. Return TRUE if
2460 some allocnos were coalesced in the function. */
2461 static bool
2462 coalesce_spill_slots (ira_allocno_t *spilled_coalesced_allocnos, int num)
2464 int i, j, n, last_coalesced_allocno_num;
2465 ira_allocno_t allocno, a;
2466 bool merged_p = false;
2467 bitmap set_jump_crosses = regstat_get_setjmp_crosses ();
2469 slot_coalesced_allocnos_live_ranges
2470 = (live_range_t *) ira_allocate (sizeof (live_range_t) * ira_allocnos_num);
2471 memset (slot_coalesced_allocnos_live_ranges, 0,
2472 sizeof (live_range_t) * ira_allocnos_num);
2473 last_coalesced_allocno_num = 0;
2474 /* Coalesce non-conflicting spilled allocnos preferring most
2475 frequently used. */
2476 for (i = 0; i < num; i++)
2478 allocno = spilled_coalesced_allocnos[i];
2479 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2480 || bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (allocno))
2481 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2482 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
2483 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
2484 continue;
2485 for (j = 0; j < i; j++)
2487 a = spilled_coalesced_allocnos[j];
2488 n = ALLOCNO_TEMP (a);
2489 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a
2490 && ! bitmap_bit_p (set_jump_crosses, ALLOCNO_REGNO (a))
2491 && (ALLOCNO_REGNO (a) >= ira_reg_equiv_len
2492 || (! ira_reg_equiv_invariant_p[ALLOCNO_REGNO (a)]
2493 && ira_reg_equiv_const[ALLOCNO_REGNO (a)] == NULL_RTX))
2494 && ! slot_coalesced_allocno_live_ranges_intersect_p (allocno, n))
2495 break;
2497 if (j >= i)
2499 /* No coalescing: set up number for coalesced allocnos
2500 represented by ALLOCNO. */
2501 ALLOCNO_TEMP (allocno) = last_coalesced_allocno_num++;
2502 setup_slot_coalesced_allocno_live_ranges (allocno);
2504 else
2506 allocno_coalesced_p = true;
2507 merged_p = true;
2508 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2509 fprintf (ira_dump_file,
2510 " Coalescing spilled allocnos a%dr%d->a%dr%d\n",
2511 ALLOCNO_NUM (allocno), ALLOCNO_REGNO (allocno),
2512 ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
2513 ALLOCNO_TEMP (allocno) = ALLOCNO_TEMP (a);
2514 setup_slot_coalesced_allocno_live_ranges (allocno);
2515 merge_allocnos (a, allocno);
2516 ira_assert (ALLOCNO_FIRST_COALESCED_ALLOCNO (a) == a);
2519 for (i = 0; i < ira_allocnos_num; i++)
2520 ira_finish_live_range_list (slot_coalesced_allocnos_live_ranges[i]);
2521 ira_free (slot_coalesced_allocnos_live_ranges);
2522 return merged_p;
2525 /* Sort pseudo-register numbers in array PSEUDO_REGNOS of length N for
2526 subsequent assigning stack slots to them in the reload pass. To do
2527 this we coalesce spilled allocnos first to decrease the number of
2528 memory-memory move insns. This function is called by the
2529 reload. */
2530 void
2531 ira_sort_regnos_for_alter_reg (int *pseudo_regnos, int n,
2532 unsigned int *reg_max_ref_width)
2534 int max_regno = max_reg_num ();
2535 int i, regno, num, slot_num;
2536 ira_allocno_t allocno, a;
2537 ira_allocno_iterator ai;
2538 ira_allocno_t *spilled_coalesced_allocnos;
2540 /* Set up allocnos can be coalesced. */
2541 coloring_allocno_bitmap = ira_allocate_bitmap ();
2542 for (i = 0; i < n; i++)
2544 regno = pseudo_regnos[i];
2545 allocno = ira_regno_allocno_map[regno];
2546 if (allocno != NULL)
2547 bitmap_set_bit (coloring_allocno_bitmap,
2548 ALLOCNO_NUM (allocno));
2550 allocno_coalesced_p = false;
2551 processed_coalesced_allocno_bitmap = ira_allocate_bitmap ();
2552 coalesce_allocnos ();
2553 ira_free_bitmap (coloring_allocno_bitmap);
2554 regno_coalesced_allocno_cost
2555 = (int *) ira_allocate (max_regno * sizeof (int));
2556 regno_coalesced_allocno_num
2557 = (int *) ira_allocate (max_regno * sizeof (int));
2558 memset (regno_coalesced_allocno_num, 0, max_regno * sizeof (int));
2559 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2560 /* Sort regnos according frequencies of the corresponding coalesced
2561 allocno sets. */
2562 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_freq_compare);
2563 spilled_coalesced_allocnos
2564 = (ira_allocno_t *) ira_allocate (ira_allocnos_num
2565 * sizeof (ira_allocno_t));
2566 /* Collect allocnos representing the spilled coalesced allocno
2567 sets. */
2568 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2569 spilled_coalesced_allocnos);
2570 if (flag_ira_share_spill_slots
2571 && coalesce_spill_slots (spilled_coalesced_allocnos, num))
2573 setup_coalesced_allocno_costs_and_nums (pseudo_regnos, n);
2574 qsort (pseudo_regnos, n, sizeof (int),
2575 coalesced_pseudo_reg_freq_compare);
2576 num = collect_spilled_coalesced_allocnos (pseudo_regnos, n,
2577 spilled_coalesced_allocnos);
2579 ira_free_bitmap (processed_coalesced_allocno_bitmap);
2580 allocno_coalesced_p = false;
2581 /* Assign stack slot numbers to spilled allocno sets, use smaller
2582 numbers for most frequently used coalesced allocnos. -1 is
2583 reserved for dynamic search of stack slots for pseudos spilled by
2584 the reload. */
2585 slot_num = 1;
2586 for (i = 0; i < num; i++)
2588 allocno = spilled_coalesced_allocnos[i];
2589 if (ALLOCNO_FIRST_COALESCED_ALLOCNO (allocno) != allocno
2590 || ALLOCNO_HARD_REGNO (allocno) >= 0
2591 || (ALLOCNO_REGNO (allocno) < ira_reg_equiv_len
2592 && (ira_reg_equiv_const[ALLOCNO_REGNO (allocno)] != NULL_RTX
2593 || ira_reg_equiv_invariant_p[ALLOCNO_REGNO (allocno)])))
2594 continue;
2595 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2596 fprintf (ira_dump_file, " Slot %d (freq,size):", slot_num);
2597 slot_num++;
2598 for (a = ALLOCNO_NEXT_COALESCED_ALLOCNO (allocno);;
2599 a = ALLOCNO_NEXT_COALESCED_ALLOCNO (a))
2601 ira_assert (ALLOCNO_HARD_REGNO (a) < 0);
2602 ALLOCNO_HARD_REGNO (a) = -slot_num;
2603 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2604 fprintf (ira_dump_file, " a%dr%d(%d,%d)",
2605 ALLOCNO_NUM (a), ALLOCNO_REGNO (a), ALLOCNO_FREQ (a),
2606 MAX (PSEUDO_REGNO_BYTES (ALLOCNO_REGNO (a)),
2607 reg_max_ref_width[ALLOCNO_REGNO (a)]));
2609 if (a == allocno)
2610 break;
2612 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2613 fprintf (ira_dump_file, "\n");
2615 ira_spilled_reg_stack_slots_num = slot_num - 1;
2616 ira_free (spilled_coalesced_allocnos);
2617 /* Sort regnos according the slot numbers. */
2618 regno_max_ref_width = reg_max_ref_width;
2619 qsort (pseudo_regnos, n, sizeof (int), coalesced_pseudo_reg_slot_compare);
2620 /* Uncoalesce allocnos which is necessary for (re)assigning during
2621 the reload pass. */
2622 FOR_EACH_ALLOCNO (a, ai)
2624 ALLOCNO_FIRST_COALESCED_ALLOCNO (a) = a;
2625 ALLOCNO_NEXT_COALESCED_ALLOCNO (a) = a;
2627 ira_free (regno_coalesced_allocno_num);
2628 ira_free (regno_coalesced_allocno_cost);
2633 /* This page contains code used by the reload pass to improve the
2634 final code. */
2636 /* The function is called from reload to mark changes in the
2637 allocation of REGNO made by the reload. Remember that reg_renumber
2638 reflects the change result. */
2639 void
2640 ira_mark_allocation_change (int regno)
2642 ira_allocno_t a = ira_regno_allocno_map[regno];
2643 int old_hard_regno, hard_regno, cost;
2644 enum reg_class cover_class = ALLOCNO_COVER_CLASS (a);
2646 ira_assert (a != NULL);
2647 hard_regno = reg_renumber[regno];
2648 if ((old_hard_regno = ALLOCNO_HARD_REGNO (a)) == hard_regno)
2649 return;
2650 if (old_hard_regno < 0)
2651 cost = -ALLOCNO_MEMORY_COST (a);
2652 else
2654 ira_assert (ira_class_hard_reg_index[cover_class][old_hard_regno] >= 0);
2655 cost = -(ALLOCNO_HARD_REG_COSTS (a) == NULL
2656 ? ALLOCNO_COVER_CLASS_COST (a)
2657 : ALLOCNO_HARD_REG_COSTS (a)
2658 [ira_class_hard_reg_index[cover_class][old_hard_regno]]);
2659 update_copy_costs (a, false);
2661 ira_overall_cost -= cost;
2662 ALLOCNO_HARD_REGNO (a) = hard_regno;
2663 if (hard_regno < 0)
2665 ALLOCNO_HARD_REGNO (a) = -1;
2666 cost += ALLOCNO_MEMORY_COST (a);
2668 else if (ira_class_hard_reg_index[cover_class][hard_regno] >= 0)
2670 cost += (ALLOCNO_HARD_REG_COSTS (a) == NULL
2671 ? ALLOCNO_COVER_CLASS_COST (a)
2672 : ALLOCNO_HARD_REG_COSTS (a)
2673 [ira_class_hard_reg_index[cover_class][hard_regno]]);
2674 update_copy_costs (a, true);
2676 else
2677 /* Reload changed class of the allocno. */
2678 cost = 0;
2679 ira_overall_cost += cost;
2682 /* This function is called when reload deletes memory-memory move. In
2683 this case we marks that the allocation of the corresponding
2684 allocnos should be not changed in future. Otherwise we risk to get
2685 a wrong code. */
2686 void
2687 ira_mark_memory_move_deletion (int dst_regno, int src_regno)
2689 ira_allocno_t dst = ira_regno_allocno_map[dst_regno];
2690 ira_allocno_t src = ira_regno_allocno_map[src_regno];
2692 ira_assert (dst != NULL && src != NULL
2693 && ALLOCNO_HARD_REGNO (dst) < 0
2694 && ALLOCNO_HARD_REGNO (src) < 0);
2695 ALLOCNO_DONT_REASSIGN_P (dst) = true;
2696 ALLOCNO_DONT_REASSIGN_P (src) = true;
2699 /* Try to assign a hard register (except for FORBIDDEN_REGS) to
2700 allocno A and return TRUE in the case of success. */
2701 static bool
2702 allocno_reload_assign (ira_allocno_t a, HARD_REG_SET forbidden_regs)
2704 int hard_regno;
2705 enum reg_class cover_class;
2706 int regno = ALLOCNO_REGNO (a);
2707 HARD_REG_SET saved[2];
2708 int i, n;
2710 n = ALLOCNO_NUM_OBJECTS (a);
2711 for (i = 0; i < n; i++)
2713 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2714 COPY_HARD_REG_SET (saved[i], OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2715 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), forbidden_regs);
2716 if (! flag_caller_saves && ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
2717 IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2718 call_used_reg_set);
2720 ALLOCNO_ASSIGNED_P (a) = false;
2721 cover_class = ALLOCNO_COVER_CLASS (a);
2722 update_curr_costs (a);
2723 assign_hard_reg (a, true);
2724 hard_regno = ALLOCNO_HARD_REGNO (a);
2725 reg_renumber[regno] = hard_regno;
2726 if (hard_regno < 0)
2727 ALLOCNO_HARD_REGNO (a) = -1;
2728 else
2730 ira_assert (ira_class_hard_reg_index[cover_class][hard_regno] >= 0);
2731 ira_overall_cost -= (ALLOCNO_MEMORY_COST (a)
2732 - (ALLOCNO_HARD_REG_COSTS (a) == NULL
2733 ? ALLOCNO_COVER_CLASS_COST (a)
2734 : ALLOCNO_HARD_REG_COSTS (a)
2735 [ira_class_hard_reg_index
2736 [cover_class][hard_regno]]));
2737 if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
2738 && ! ira_hard_reg_not_in_set_p (hard_regno, ALLOCNO_MODE (a),
2739 call_used_reg_set))
2741 ira_assert (flag_caller_saves);
2742 caller_save_needed = 1;
2746 /* If we found a hard register, modify the RTL for the pseudo
2747 register to show the hard register, and mark the pseudo register
2748 live. */
2749 if (reg_renumber[regno] >= 0)
2751 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2752 fprintf (ira_dump_file, ": reassign to %d\n", reg_renumber[regno]);
2753 SET_REGNO (regno_reg_rtx[regno], reg_renumber[regno]);
2754 mark_home_live (regno);
2756 else if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2757 fprintf (ira_dump_file, "\n");
2758 for (i = 0; i < n; i++)
2760 ira_object_t obj = ALLOCNO_OBJECT (a, i);
2761 COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), saved[i]);
2763 return reg_renumber[regno] >= 0;
2766 /* Sort pseudos according their usage frequencies (putting most
2767 frequently ones first). */
2768 static int
2769 pseudo_reg_compare (const void *v1p, const void *v2p)
2771 int regno1 = *(const int *) v1p;
2772 int regno2 = *(const int *) v2p;
2773 int diff;
2775 if ((diff = REG_FREQ (regno2) - REG_FREQ (regno1)) != 0)
2776 return diff;
2777 return regno1 - regno2;
2780 /* Try to allocate hard registers to SPILLED_PSEUDO_REGS (there are
2781 NUM of them) or spilled pseudos conflicting with pseudos in
2782 SPILLED_PSEUDO_REGS. Return TRUE and update SPILLED, if the
2783 allocation has been changed. The function doesn't use
2784 BAD_SPILL_REGS and hard registers in PSEUDO_FORBIDDEN_REGS and
2785 PSEUDO_PREVIOUS_REGS for the corresponding pseudos. The function
2786 is called by the reload pass at the end of each reload
2787 iteration. */
2788 bool
2789 ira_reassign_pseudos (int *spilled_pseudo_regs, int num,
2790 HARD_REG_SET bad_spill_regs,
2791 HARD_REG_SET *pseudo_forbidden_regs,
2792 HARD_REG_SET *pseudo_previous_regs,
2793 bitmap spilled)
2795 int i, n, regno;
2796 bool changed_p;
2797 ira_allocno_t a;
2798 HARD_REG_SET forbidden_regs;
2799 bitmap temp = BITMAP_ALLOC (NULL);
2801 /* Add pseudos which conflict with pseudos already in
2802 SPILLED_PSEUDO_REGS to SPILLED_PSEUDO_REGS. This is preferable
2803 to allocating in two steps as some of the conflicts might have
2804 a higher priority than the pseudos passed in SPILLED_PSEUDO_REGS. */
2805 for (i = 0; i < num; i++)
2806 bitmap_set_bit (temp, spilled_pseudo_regs[i]);
2808 for (i = 0, n = num; i < n; i++)
2810 int nr, j;
2811 int regno = spilled_pseudo_regs[i];
2812 bitmap_set_bit (temp, regno);
2814 a = ira_regno_allocno_map[regno];
2815 nr = ALLOCNO_NUM_OBJECTS (a);
2816 for (j = 0; j < nr; j++)
2818 ira_object_t conflict_obj;
2819 ira_object_t obj = ALLOCNO_OBJECT (a, j);
2820 ira_object_conflict_iterator oci;
2822 FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2824 ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2825 if (ALLOCNO_HARD_REGNO (conflict_a) < 0
2826 && ! ALLOCNO_DONT_REASSIGN_P (conflict_a)
2827 && bitmap_set_bit (temp, ALLOCNO_REGNO (conflict_a)))
2829 spilled_pseudo_regs[num++] = ALLOCNO_REGNO (conflict_a);
2830 /* ?!? This seems wrong. */
2831 bitmap_set_bit (consideration_allocno_bitmap,
2832 ALLOCNO_NUM (conflict_a));
2838 if (num > 1)
2839 qsort (spilled_pseudo_regs, num, sizeof (int), pseudo_reg_compare);
2840 changed_p = false;
2841 /* Try to assign hard registers to pseudos from
2842 SPILLED_PSEUDO_REGS. */
2843 for (i = 0; i < num; i++)
2845 regno = spilled_pseudo_regs[i];
2846 COPY_HARD_REG_SET (forbidden_regs, bad_spill_regs);
2847 IOR_HARD_REG_SET (forbidden_regs, pseudo_forbidden_regs[regno]);
2848 IOR_HARD_REG_SET (forbidden_regs, pseudo_previous_regs[regno]);
2849 gcc_assert (reg_renumber[regno] < 0);
2850 a = ira_regno_allocno_map[regno];
2851 ira_mark_allocation_change (regno);
2852 ira_assert (reg_renumber[regno] < 0);
2853 if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2854 fprintf (ira_dump_file,
2855 " Try Assign %d(a%d), cost=%d", regno, ALLOCNO_NUM (a),
2856 ALLOCNO_MEMORY_COST (a)
2857 - ALLOCNO_COVER_CLASS_COST (a));
2858 allocno_reload_assign (a, forbidden_regs);
2859 if (reg_renumber[regno] >= 0)
2861 CLEAR_REGNO_REG_SET (spilled, regno);
2862 changed_p = true;
2865 BITMAP_FREE (temp);
2866 return changed_p;
2869 /* The function is called by reload and returns already allocated
2870 stack slot (if any) for REGNO with given INHERENT_SIZE and
2871 TOTAL_SIZE. In the case of failure to find a slot which can be
2872 used for REGNO, the function returns NULL. */
2874 ira_reuse_stack_slot (int regno, unsigned int inherent_size,
2875 unsigned int total_size)
2877 unsigned int i;
2878 int slot_num, best_slot_num;
2879 int cost, best_cost;
2880 ira_copy_t cp, next_cp;
2881 ira_allocno_t another_allocno, allocno = ira_regno_allocno_map[regno];
2882 rtx x;
2883 bitmap_iterator bi;
2884 struct ira_spilled_reg_stack_slot *slot = NULL;
2886 ira_assert (inherent_size == PSEUDO_REGNO_BYTES (regno)
2887 && inherent_size <= total_size
2888 && ALLOCNO_HARD_REGNO (allocno) < 0);
2889 if (! flag_ira_share_spill_slots)
2890 return NULL_RTX;
2891 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
2892 if (slot_num != -1)
2894 slot = &ira_spilled_reg_stack_slots[slot_num];
2895 x = slot->mem;
2897 else
2899 best_cost = best_slot_num = -1;
2900 x = NULL_RTX;
2901 /* It means that the pseudo was spilled in the reload pass, try
2902 to reuse a slot. */
2903 for (slot_num = 0;
2904 slot_num < ira_spilled_reg_stack_slots_num;
2905 slot_num++)
2907 slot = &ira_spilled_reg_stack_slots[slot_num];
2908 if (slot->mem == NULL_RTX)
2909 continue;
2910 if (slot->width < total_size
2911 || GET_MODE_SIZE (GET_MODE (slot->mem)) < inherent_size)
2912 continue;
2914 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2915 FIRST_PSEUDO_REGISTER, i, bi)
2917 another_allocno = ira_regno_allocno_map[i];
2918 if (allocnos_have_intersected_live_ranges_p (allocno,
2919 another_allocno))
2920 goto cont;
2922 for (cost = 0, cp = ALLOCNO_COPIES (allocno);
2923 cp != NULL;
2924 cp = next_cp)
2926 if (cp->first == allocno)
2928 next_cp = cp->next_first_allocno_copy;
2929 another_allocno = cp->second;
2931 else if (cp->second == allocno)
2933 next_cp = cp->next_second_allocno_copy;
2934 another_allocno = cp->first;
2936 else
2937 gcc_unreachable ();
2938 if (cp->insn == NULL_RTX)
2939 continue;
2940 if (bitmap_bit_p (&slot->spilled_regs,
2941 ALLOCNO_REGNO (another_allocno)))
2942 cost += cp->freq;
2944 if (cost > best_cost)
2946 best_cost = cost;
2947 best_slot_num = slot_num;
2949 cont:
2952 if (best_cost >= 0)
2954 slot_num = best_slot_num;
2955 slot = &ira_spilled_reg_stack_slots[slot_num];
2956 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
2957 x = slot->mem;
2958 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
2961 if (x != NULL_RTX)
2963 ira_assert (slot->width >= total_size);
2964 #ifdef ENABLE_IRA_CHECKING
2965 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2966 FIRST_PSEUDO_REGISTER, i, bi)
2968 ira_assert (! pseudos_have_intersected_live_ranges_p (regno, i));
2970 #endif
2971 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
2972 if (internal_flag_ira_verbose > 3 && ira_dump_file)
2974 fprintf (ira_dump_file, " Assigning %d(freq=%d) slot %d of",
2975 regno, REG_FREQ (regno), slot_num);
2976 EXECUTE_IF_SET_IN_BITMAP (&slot->spilled_regs,
2977 FIRST_PSEUDO_REGISTER, i, bi)
2979 if ((unsigned) regno != i)
2980 fprintf (ira_dump_file, " %d", i);
2982 fprintf (ira_dump_file, "\n");
2985 return x;
2988 /* This is called by reload every time a new stack slot X with
2989 TOTAL_SIZE was allocated for REGNO. We store this info for
2990 subsequent ira_reuse_stack_slot calls. */
2991 void
2992 ira_mark_new_stack_slot (rtx x, int regno, unsigned int total_size)
2994 struct ira_spilled_reg_stack_slot *slot;
2995 int slot_num;
2996 ira_allocno_t allocno;
2998 ira_assert (PSEUDO_REGNO_BYTES (regno) <= total_size);
2999 allocno = ira_regno_allocno_map[regno];
3000 slot_num = -ALLOCNO_HARD_REGNO (allocno) - 2;
3001 if (slot_num == -1)
3003 slot_num = ira_spilled_reg_stack_slots_num++;
3004 ALLOCNO_HARD_REGNO (allocno) = -slot_num - 2;
3006 slot = &ira_spilled_reg_stack_slots[slot_num];
3007 INIT_REG_SET (&slot->spilled_regs);
3008 SET_REGNO_REG_SET (&slot->spilled_regs, regno);
3009 slot->mem = x;
3010 slot->width = total_size;
3011 if (internal_flag_ira_verbose > 3 && ira_dump_file)
3012 fprintf (ira_dump_file, " Assigning %d(freq=%d) a new slot %d\n",
3013 regno, REG_FREQ (regno), slot_num);
3017 /* Return spill cost for pseudo-registers whose numbers are in array
3018 REGNOS (with a negative number as an end marker) for reload with
3019 given IN and OUT for INSN. Return also number points (through
3020 EXCESS_PRESSURE_LIVE_LENGTH) where the pseudo-register lives and
3021 the register pressure is high, number of references of the
3022 pseudo-registers (through NREFS), number of callee-clobbered
3023 hard-registers occupied by the pseudo-registers (through
3024 CALL_USED_COUNT), and the first hard regno occupied by the
3025 pseudo-registers (through FIRST_HARD_REGNO). */
3026 static int
3027 calculate_spill_cost (int *regnos, rtx in, rtx out, rtx insn,
3028 int *excess_pressure_live_length,
3029 int *nrefs, int *call_used_count, int *first_hard_regno)
3031 int i, cost, regno, hard_regno, j, count, saved_cost, nregs;
3032 bool in_p, out_p;
3033 int length;
3034 ira_allocno_t a;
3036 *nrefs = 0;
3037 for (length = count = cost = i = 0;; i++)
3039 regno = regnos[i];
3040 if (regno < 0)
3041 break;
3042 *nrefs += REG_N_REFS (regno);
3043 hard_regno = reg_renumber[regno];
3044 ira_assert (hard_regno >= 0);
3045 a = ira_regno_allocno_map[regno];
3046 length += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) / ALLOCNO_NUM_OBJECTS (a);
3047 cost += ALLOCNO_MEMORY_COST (a) - ALLOCNO_COVER_CLASS_COST (a);
3048 nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
3049 for (j = 0; j < nregs; j++)
3050 if (! TEST_HARD_REG_BIT (call_used_reg_set, hard_regno + j))
3051 break;
3052 if (j == nregs)
3053 count++;
3054 in_p = in && REG_P (in) && (int) REGNO (in) == hard_regno;
3055 out_p = out && REG_P (out) && (int) REGNO (out) == hard_regno;
3056 if ((in_p || out_p)
3057 && find_regno_note (insn, REG_DEAD, hard_regno) != NULL_RTX)
3059 saved_cost = 0;
3060 if (in_p)
3061 saved_cost += ira_memory_move_cost
3062 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][1];
3063 if (out_p)
3064 saved_cost
3065 += ira_memory_move_cost
3066 [ALLOCNO_MODE (a)][ALLOCNO_COVER_CLASS (a)][0];
3067 cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn)) * saved_cost;
3070 *excess_pressure_live_length = length;
3071 *call_used_count = count;
3072 hard_regno = -1;
3073 if (regnos[0] >= 0)
3075 hard_regno = reg_renumber[regnos[0]];
3077 *first_hard_regno = hard_regno;
3078 return cost;
3081 /* Return TRUE if spilling pseudo-registers whose numbers are in array
3082 REGNOS is better than spilling pseudo-registers with numbers in
3083 OTHER_REGNOS for reload with given IN and OUT for INSN. The
3084 function used by the reload pass to make better register spilling
3085 decisions. */
3086 bool
3087 ira_better_spill_reload_regno_p (int *regnos, int *other_regnos,
3088 rtx in, rtx out, rtx insn)
3090 int cost, other_cost;
3091 int length, other_length;
3092 int nrefs, other_nrefs;
3093 int call_used_count, other_call_used_count;
3094 int hard_regno, other_hard_regno;
3096 cost = calculate_spill_cost (regnos, in, out, insn,
3097 &length, &nrefs, &call_used_count, &hard_regno);
3098 other_cost = calculate_spill_cost (other_regnos, in, out, insn,
3099 &other_length, &other_nrefs,
3100 &other_call_used_count,
3101 &other_hard_regno);
3102 if (nrefs == 0 && other_nrefs != 0)
3103 return true;
3104 if (nrefs != 0 && other_nrefs == 0)
3105 return false;
3106 if (cost != other_cost)
3107 return cost < other_cost;
3108 if (length != other_length)
3109 return length > other_length;
3110 #ifdef REG_ALLOC_ORDER
3111 if (hard_regno >= 0 && other_hard_regno >= 0)
3112 return (inv_reg_alloc_order[hard_regno]
3113 < inv_reg_alloc_order[other_hard_regno]);
3114 #else
3115 if (call_used_count != other_call_used_count)
3116 return call_used_count > other_call_used_count;
3117 #endif
3118 return false;
3123 /* Allocate and initialize data necessary for assign_hard_reg. */
3124 void
3125 ira_initiate_assign (void)
3127 sorted_allocnos
3128 = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3129 * ira_allocnos_num);
3130 consideration_allocno_bitmap = ira_allocate_bitmap ();
3131 initiate_cost_update ();
3132 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3135 /* Deallocate data used by assign_hard_reg. */
3136 void
3137 ira_finish_assign (void)
3139 ira_free (sorted_allocnos);
3140 ira_free_bitmap (consideration_allocno_bitmap);
3141 finish_cost_update ();
3142 ira_free (allocno_priorities);
3147 /* Entry function doing color-based register allocation. */
3148 static void
3149 color (void)
3151 allocno_stack_vec = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3152 removed_splay_allocno_vec
3153 = VEC_alloc (ira_allocno_t, heap, ira_allocnos_num);
3154 memset (allocated_hardreg_p, 0, sizeof (allocated_hardreg_p));
3155 ira_initiate_assign ();
3156 do_coloring ();
3157 ira_finish_assign ();
3158 VEC_free (ira_allocno_t, heap, removed_splay_allocno_vec);
3159 VEC_free (ira_allocno_t, heap, allocno_stack_vec);
3160 move_spill_restore ();
3165 /* This page contains a simple register allocator without usage of
3166 allocno conflicts. This is used for fast allocation for -O0. */
3168 /* Do register allocation by not using allocno conflicts. It uses
3169 only allocno live ranges. The algorithm is close to Chow's
3170 priority coloring. */
3171 static void
3172 fast_allocation (void)
3174 int i, j, k, num, class_size, hard_regno;
3175 #ifdef STACK_REGS
3176 bool no_stack_reg_p;
3177 #endif
3178 enum reg_class cover_class;
3179 enum machine_mode mode;
3180 ira_allocno_t a;
3181 ira_allocno_iterator ai;
3182 live_range_t r;
3183 HARD_REG_SET conflict_hard_regs, *used_hard_regs;
3185 sorted_allocnos = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
3186 * ira_allocnos_num);
3187 num = 0;
3188 FOR_EACH_ALLOCNO (a, ai)
3189 sorted_allocnos[num++] = a;
3190 allocno_priorities = (int *) ira_allocate (sizeof (int) * ira_allocnos_num);
3191 setup_allocno_priorities (sorted_allocnos, num);
3192 used_hard_regs = (HARD_REG_SET *) ira_allocate (sizeof (HARD_REG_SET)
3193 * ira_max_point);
3194 for (i = 0; i < ira_max_point; i++)
3195 CLEAR_HARD_REG_SET (used_hard_regs[i]);
3196 qsort (sorted_allocnos, num, sizeof (ira_allocno_t),
3197 allocno_priority_compare_func);
3198 for (i = 0; i < num; i++)
3200 int nr, l;
3202 a = sorted_allocnos[i];
3203 nr = ALLOCNO_NUM_OBJECTS (a);
3204 CLEAR_HARD_REG_SET (conflict_hard_regs);
3205 for (l = 0; l < nr; l++)
3207 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3208 IOR_HARD_REG_SET (conflict_hard_regs,
3209 OBJECT_CONFLICT_HARD_REGS (obj));
3210 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3211 for (j = r->start; j <= r->finish; j++)
3212 IOR_HARD_REG_SET (conflict_hard_regs, used_hard_regs[j]);
3214 cover_class = ALLOCNO_COVER_CLASS (a);
3215 ALLOCNO_ASSIGNED_P (a) = true;
3216 ALLOCNO_HARD_REGNO (a) = -1;
3217 if (hard_reg_set_subset_p (reg_class_contents[cover_class],
3218 conflict_hard_regs))
3219 continue;
3220 mode = ALLOCNO_MODE (a);
3221 #ifdef STACK_REGS
3222 no_stack_reg_p = ALLOCNO_NO_STACK_REG_P (a);
3223 #endif
3224 class_size = ira_class_hard_regs_num[cover_class];
3225 for (j = 0; j < class_size; j++)
3227 hard_regno = ira_class_hard_regs[cover_class][j];
3228 #ifdef STACK_REGS
3229 if (no_stack_reg_p && FIRST_STACK_REG <= hard_regno
3230 && hard_regno <= LAST_STACK_REG)
3231 continue;
3232 #endif
3233 if (!ira_hard_reg_not_in_set_p (hard_regno, mode, conflict_hard_regs)
3234 || (TEST_HARD_REG_BIT
3235 (prohibited_class_mode_regs[cover_class][mode], hard_regno)))
3236 continue;
3237 ALLOCNO_HARD_REGNO (a) = hard_regno;
3238 for (l = 0; l < nr; l++)
3240 ira_object_t obj = ALLOCNO_OBJECT (a, l);
3241 for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3242 for (k = r->start; k <= r->finish; k++)
3243 IOR_HARD_REG_SET (used_hard_regs[k],
3244 ira_reg_mode_hard_regset[hard_regno][mode]);
3246 break;
3249 ira_free (sorted_allocnos);
3250 ira_free (used_hard_regs);
3251 ira_free (allocno_priorities);
3252 if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
3253 ira_print_disposition (ira_dump_file);
3258 /* Entry function doing coloring. */
3259 void
3260 ira_color (void)
3262 ira_allocno_t a;
3263 ira_allocno_iterator ai;
3265 /* Setup updated costs. */
3266 FOR_EACH_ALLOCNO (a, ai)
3268 ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
3269 ALLOCNO_UPDATED_COVER_CLASS_COST (a) = ALLOCNO_COVER_CLASS_COST (a);
3271 if (ira_conflicts_p)
3272 color ();
3273 else
3274 fast_allocation ();