* config/arm/bpabi.h (SUBTARGET_EXTRA_ASM_SPEC): Change meabi=3 to
[official-gcc.git] / gcc / ra-colorize.c
blobfe963e4273cf3cd13acd4ec46a2d047775aa4139
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2004 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "function.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
31 #include "df.h"
32 #include "output.h"
33 #include "ra.h"
35 /* This file is part of the graph coloring register allocator.
36 It contains the graph colorizer. Given an interference graph
37 as set up in ra-build.c the toplevel function in this file
38 (ra_colorize_graph) colorizes the graph, leaving a list
39 of colored, coalesced and spilled nodes.
41 The algorithm used is a merge of George & Appels iterative coalescing
42 and optimistic coalescing, switchable at runtime. The current default
43 is "optimistic coalescing +", which is based on the normal Briggs/Cooper
44 framework. We can also use biased coloring. Most of the structure
45 here follows the different papers.
47 Additionally there is a custom step to locally improve the overall
48 spill cost of the colored graph (recolor_spills). */
50 static void push_list (struct dlist *, struct dlist **);
51 static void push_list_end (struct dlist *, struct dlist **);
52 static void free_dlist (struct dlist **);
53 static void put_web_at_end (struct web *, enum ra_node_type);
54 static void put_move (struct move *, enum move_type);
55 static void build_worklists (struct df *);
56 static void enable_move (struct web *);
57 static void decrement_degree (struct web *, int);
58 static void simplify (void);
59 static void remove_move_1 (struct web *, struct move *);
60 static void remove_move (struct web *, struct move *);
61 static void add_worklist (struct web *);
62 static int ok (struct web *, struct web *);
63 static int conservative (struct web *, struct web *);
64 static inline unsigned int simplify_p (enum ra_node_type);
65 static void combine (struct web *, struct web *);
66 static void coalesce (void);
67 static void freeze_moves (struct web *);
68 static void freeze (void);
69 static void select_spill (void);
70 static int color_usable_p (int, HARD_REG_SET, HARD_REG_SET,
71 enum machine_mode);
72 int get_free_reg (HARD_REG_SET, HARD_REG_SET, enum machine_mode);
73 static int get_biased_reg (HARD_REG_SET, HARD_REG_SET, HARD_REG_SET,
74 HARD_REG_SET, enum machine_mode);
75 static int count_long_blocks (HARD_REG_SET, int);
76 static char * hardregset_to_string (HARD_REG_SET);
77 static void calculate_dont_begin (struct web *, HARD_REG_SET *);
78 static void colorize_one_web (struct web *, int);
79 static void assign_colors (void);
80 static void try_recolor_web (struct web *);
81 static void insert_coalesced_conflicts (void);
82 static int comp_webs_maxcost (const void *, const void *);
83 static void recolor_spills (void);
84 static void check_colors (void);
85 static void restore_conflicts_from_coalesce (struct web *);
86 static void break_coalesced_spills (void);
87 static void unalias_web (struct web *);
88 static void break_aliases_to_web (struct web *);
89 static void break_precolored_alias (struct web *);
90 static void init_web_pairs (void);
91 static void add_web_pair_cost (struct web *, struct web *,
92 unsigned HOST_WIDE_INT, unsigned int);
93 static int comp_web_pairs (const void *, const void *);
94 static void sort_and_combine_web_pairs (int);
95 static int ok_class (struct web *, struct web *);
96 static void aggressive_coalesce (void);
97 static void extended_coalesce_2 (void);
98 static void check_uncoalesced_moves (void);
100 static struct dlist *mv_worklist, *mv_coalesced, *mv_constrained;
101 static struct dlist *mv_frozen, *mv_active;
103 /* Push a node onto the front of the list. */
105 static void
106 push_list (struct dlist *x, struct dlist **list)
108 gcc_assert (!x->next);
109 gcc_assert (!x->prev);
110 x->next = *list;
111 if (*list)
112 (*list)->prev = x;
113 *list = x;
116 static void
117 push_list_end (struct dlist *x, struct dlist **list)
119 gcc_assert (!x->prev);
120 gcc_assert (!x->next);
121 if (!*list)
123 *list = x;
124 return;
126 while ((*list)->next)
127 list = &((*list)->next);
128 x->prev = *list;
129 (*list)->next = x;
132 /* Remove a node from the list. */
134 void
135 remove_list (struct dlist *x, struct dlist **list)
137 struct dlist *y = x->prev;
138 if (y)
139 y->next = x->next;
140 else
141 *list = x->next;
142 y = x->next;
143 if (y)
144 y->prev = x->prev;
145 x->next = x->prev = NULL;
148 /* Pop the front of the list. */
150 struct dlist *
151 pop_list (struct dlist **list)
153 struct dlist *r = *list;
154 if (r)
155 remove_list (r, list);
156 return r;
159 /* Free the given double linked list. */
161 static void
162 free_dlist (struct dlist **list)
164 *list = NULL;
167 /* The web WEB should get the given new TYPE. Put it onto the
168 appropriate list.
169 Inline, because it's called with constant TYPE every time. */
171 inline void
172 put_web (struct web *web, enum ra_node_type type)
174 switch (type)
176 case INITIAL:
177 case FREE:
178 case FREEZE:
179 case SPILL:
180 case SPILLED:
181 case COALESCED:
182 case COLORED:
183 case SELECT:
184 push_list (web->dlink, &WEBS(type));
185 break;
186 case PRECOLORED:
187 push_list (web->dlink, &WEBS(INITIAL));
188 break;
189 case SIMPLIFY:
190 if (web->spill_temp)
191 push_list (web->dlink, &WEBS(type = SIMPLIFY_SPILL));
192 else if (web->add_hardregs)
193 push_list (web->dlink, &WEBS(type = SIMPLIFY_FAT));
194 else
195 push_list (web->dlink, &WEBS(SIMPLIFY));
196 break;
197 default:
198 gcc_unreachable ();
200 web->type = type;
203 /* After we are done with the whole pass of coloring/spilling,
204 we reset the lists of webs, in preparation of the next pass.
205 The spilled webs become free, colored webs go to the initial list,
206 coalesced webs become free or initial, according to what type of web
207 they are coalesced to. */
209 void
210 reset_lists (void)
212 struct dlist *d;
214 gcc_assert (!WEBS(SIMPLIFY));
215 gcc_assert (!WEBS(SIMPLIFY_SPILL));
216 gcc_assert (!WEBS(SIMPLIFY_FAT));
217 gcc_assert (!WEBS(FREEZE));
218 gcc_assert (!WEBS(SPILL));
219 gcc_assert (!WEBS(SELECT));
221 while ((d = pop_list (&WEBS(COALESCED))) != NULL)
223 struct web *web = DLIST_WEB (d);
224 struct web *aweb = alias (web);
225 /* Note, how alias() becomes invalid through the two put_web()'s
226 below. It might set the type of a web to FREE (from COALESCED),
227 which itself is a target of aliasing (i.e. in the middle of
228 an alias chain). We can handle this by checking also for
229 type == FREE. Note nevertheless, that alias() is invalid
230 henceforth. */
231 if (aweb->type == SPILLED || aweb->type == FREE)
232 put_web (web, FREE);
233 else
234 put_web (web, INITIAL);
236 while ((d = pop_list (&WEBS(SPILLED))) != NULL)
237 put_web (DLIST_WEB (d), FREE);
238 while ((d = pop_list (&WEBS(COLORED))) != NULL)
239 put_web (DLIST_WEB (d), INITIAL);
241 /* All free webs have no conflicts anymore. */
242 for (d = WEBS(FREE); d; d = d->next)
244 struct web *web = DLIST_WEB (d);
245 BITMAP_XFREE (web->useless_conflicts);
246 web->useless_conflicts = NULL;
249 #ifdef ENABLE_CHECKING
250 /* Sanity check, that we only have free, initial or precolored webs. */
252 unsigned int i;
254 for (i = 0; i < num_webs; i++)
256 struct web *web = ID2WEB (i);
258 gcc_assert (web->type == INITIAL || web->type == FREE
259 || web->type == PRECOLORED);
262 #endif
263 free_dlist (&mv_worklist);
264 free_dlist (&mv_coalesced);
265 free_dlist (&mv_constrained);
266 free_dlist (&mv_frozen);
267 free_dlist (&mv_active);
270 /* Similar to put_web(), but add the web to the end of the appropriate
271 list. Additionally TYPE may not be SIMPLIFY. */
273 static void
274 put_web_at_end (struct web *web, enum ra_node_type type)
276 if (type == PRECOLORED)
277 type = INITIAL;
278 else
279 gcc_assert (type != SIMPLIFY);
280 push_list_end (web->dlink, &WEBS(type));
281 web->type = type;
284 /* Unlink WEB from the list it's currently on (which corresponds to
285 its current type). */
287 void
288 remove_web_from_list (struct web *web)
290 if (web->type == PRECOLORED)
291 remove_list (web->dlink, &WEBS(INITIAL));
292 else
293 remove_list (web->dlink, &WEBS(web->type));
296 /* Give MOVE the TYPE, and link it into the correct list. */
298 static inline void
299 put_move (struct move *move, enum move_type type)
301 switch (type)
303 case WORKLIST:
304 push_list (move->dlink, &mv_worklist);
305 break;
306 case MV_COALESCED:
307 push_list (move->dlink, &mv_coalesced);
308 break;
309 case CONSTRAINED:
310 push_list (move->dlink, &mv_constrained);
311 break;
312 case FROZEN:
313 push_list (move->dlink, &mv_frozen);
314 break;
315 case ACTIVE:
316 push_list (move->dlink, &mv_active);
317 break;
318 default:
319 gcc_unreachable ();
321 move->type = type;
324 /* Build the worklists we are going to process. */
326 static void
327 build_worklists (struct df *df ATTRIBUTE_UNUSED)
329 struct dlist *d, *d_next;
330 struct move_list *ml;
332 /* If we are not the first pass, put all stackwebs (which are still
333 backed by a new pseudo, but conceptually can stand for a stackslot,
334 i.e. it doesn't really matter if they get a color or not), on
335 the SELECT stack first, those with lowest cost first. This way
336 they will be colored last, so do not constrain the coloring of the
337 normal webs. But still those with the highest count are colored
338 before, i.e. get a color more probable. The use of stackregs is
339 a pure optimization, and all would work, if we used real stackslots
340 from the begin. */
341 if (ra_pass > 1)
343 unsigned int i, num, max_num;
344 struct web **order2web;
345 max_num = num_webs - num_subwebs;
346 order2web = xmalloc (max_num * sizeof (order2web[0]));
347 for (i = 0, num = 0; i < max_num; i++)
348 if (id2web[i]->regno >= max_normal_pseudo)
349 order2web[num++] = id2web[i];
350 if (num)
352 qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
353 for (i = num - 1;; i--)
355 struct web *web = order2web[i];
356 struct conflict_link *wl;
357 remove_list (web->dlink, &WEBS(INITIAL));
358 put_web (web, SELECT);
359 for (wl = web->conflict_list; wl; wl = wl->next)
361 struct web *pweb = wl->t;
362 pweb->num_conflicts -= 1 + web->add_hardregs;
364 if (i == 0)
365 break;
368 free (order2web);
371 /* For all remaining initial webs, classify them. */
372 for (d = WEBS(INITIAL); d; d = d_next)
374 struct web *web = DLIST_WEB (d);
375 d_next = d->next;
376 if (web->type == PRECOLORED)
377 continue;
379 remove_list (d, &WEBS(INITIAL));
380 if (web->num_conflicts >= NUM_REGS (web))
381 put_web (web, SPILL);
382 else if (web->moves)
383 put_web (web, FREEZE);
384 else
385 put_web (web, SIMPLIFY);
388 /* And put all moves on the worklist for iterated coalescing.
389 Note, that if iterated coalescing is off, then wl_moves doesn't
390 contain any moves. */
391 for (ml = wl_moves; ml; ml = ml->next)
392 if (ml->move)
394 struct move *m = ml->move;
395 d = ra_calloc (sizeof (struct dlist));
396 DLIST_MOVE (d) = m;
397 m->dlink = d;
398 put_move (m, WORKLIST);
402 /* Enable the active moves, in which WEB takes part, to be processed. */
404 static void
405 enable_move (struct web *web)
407 struct move_list *ml;
408 for (ml = web->moves; ml; ml = ml->next)
409 if (ml->move->type == ACTIVE)
411 remove_list (ml->move->dlink, &mv_active);
412 put_move (ml->move, WORKLIST);
416 /* Decrement the degree of node WEB by the amount DEC.
417 Possibly change the type of WEB, if the number of conflicts is
418 now smaller than its freedom. */
420 static void
421 decrement_degree (struct web *web, int dec)
423 int before = web->num_conflicts;
424 web->num_conflicts -= dec;
425 if (web->num_conflicts < NUM_REGS (web) && before >= NUM_REGS (web))
427 struct conflict_link *a;
428 enable_move (web);
429 for (a = web->conflict_list; a; a = a->next)
431 struct web *aweb = a->t;
432 if (aweb->type != SELECT && aweb->type != COALESCED)
433 enable_move (aweb);
435 if (web->type != FREEZE)
437 remove_web_from_list (web);
438 if (web->moves)
439 put_web (web, FREEZE);
440 else
441 put_web (web, SIMPLIFY);
446 /* Repeatedly simplify the nodes on the simplify worklists. */
448 static void
449 simplify (void)
451 struct dlist *d;
452 struct web *web;
453 struct conflict_link *wl;
454 while (1)
456 /* We try hard to color all the webs resulting from spills first.
457 Without that on register starved machines (x86 e.g) with some live
458 DImode pseudos, -fPIC, and an asm requiring %edx, it might be, that
459 we do rounds over rounds, because the conflict graph says, we can
460 simplify those short webs, but later due to irregularities we can't
461 color those pseudos. So we have to spill them, which in later rounds
462 leads to other spills. */
463 d = pop_list (&WEBS(SIMPLIFY));
464 if (!d)
465 d = pop_list (&WEBS(SIMPLIFY_FAT));
466 if (!d)
467 d = pop_list (&WEBS(SIMPLIFY_SPILL));
468 if (!d)
469 break;
470 web = DLIST_WEB (d);
471 ra_debug_msg (DUMP_PROCESS, " simplifying web %3d, conflicts = %d\n",
472 web->id, web->num_conflicts);
473 put_web (web, SELECT);
474 for (wl = web->conflict_list; wl; wl = wl->next)
476 struct web *pweb = wl->t;
477 if (pweb->type != SELECT && pweb->type != COALESCED)
479 decrement_degree (pweb, 1 + web->add_hardregs);
485 /* Helper function to remove a move from the movelist of the web. */
487 static void
488 remove_move_1 (struct web *web, struct move *move)
490 struct move_list *ml = web->moves;
491 if (!ml)
492 return;
493 if (ml->move == move)
495 web->moves = ml->next;
496 return;
498 for (; ml->next && ml->next->move != move; ml = ml->next) ;
499 if (!ml->next)
500 return;
501 ml->next = ml->next->next;
504 /* Remove a move from the movelist of the web. Actually this is just a
505 wrapper around remove_move_1(), making sure, the removed move really is
506 not in the list anymore. */
508 static void
509 remove_move (struct web *web, struct move *move)
511 struct move_list *ml;
512 remove_move_1 (web, move);
513 for (ml = web->moves; ml; ml = ml->next)
514 gcc_assert (ml->move != move);
517 /* Merge the moves for the two webs into the first web's movelist. */
519 void
520 merge_moves (struct web *u, struct web *v)
522 regset seen;
523 struct move_list *ml, *ml_next;
525 seen = BITMAP_XMALLOC ();
526 for (ml = u->moves; ml; ml = ml->next)
527 bitmap_set_bit (seen, INSN_UID (ml->move->insn));
528 for (ml = v->moves; ml; ml = ml_next)
530 ml_next = ml->next;
531 if (! bitmap_bit_p (seen, INSN_UID (ml->move->insn)))
533 ml->next = u->moves;
534 u->moves = ml;
537 BITMAP_XFREE (seen);
538 v->moves = NULL;
541 /* Add a web to the simplify worklist, from the freeze worklist. */
543 static void
544 add_worklist (struct web *web)
546 if (web->type != PRECOLORED && !web->moves
547 && web->num_conflicts < NUM_REGS (web))
549 remove_list (web->dlink, &WEBS(FREEZE));
550 put_web (web, SIMPLIFY);
554 /* Precolored node coalescing heuristic. */
556 static int
557 ok (struct web *target, struct web *source)
559 struct conflict_link *wl;
560 int i;
561 int color = source->color;
562 int size;
564 /* Normally one would think, the next test wouldn't be needed.
565 We try to coalesce S and T, and S has already a color, and we checked
566 when processing the insns, that both have the same mode. So naively
567 we could conclude, that of course that mode was valid for this color.
568 Hah. But there is sparc. Before reload there are copy insns
569 (e.g. the ones copying arguments to locals) which happily refer to
570 colors in invalid modes. We can't coalesce those things. */
571 if (! HARD_REGNO_MODE_OK (source->color, GET_MODE (target->orig_x)))
572 return 0;
574 /* Sanity for funny modes. */
575 size = hard_regno_nregs[color][GET_MODE (target->orig_x)];
576 if (!size)
577 return 0;
579 /* We can't coalesce target with a precolored register which isn't in
580 usable_regs. */
581 for (i = size; i--;)
582 if (TEST_HARD_REG_BIT (never_use_colors, color + i)
583 || !TEST_HARD_REG_BIT (target->usable_regs, color + i)
584 /* Before usually calling ok() at all, we already test, if the
585 candidates conflict in sup_igraph. But when wide webs are
586 coalesced to hardregs, we only test the hardweb coalesced into.
587 This is only the begin color. When actually coalescing both,
588 it will also take the following size colors, i.e. their webs.
589 We nowhere checked if the candidate possibly conflicts with
590 one of _those_, which is possible with partial conflicts,
591 so we simply do it here (this does one bit-test more than
592 necessary, the first color). Note, that if X is precolored
593 bit [X*num_webs + Y] can't be set (see add_conflict_edge()). */
594 || TEST_BIT (sup_igraph,
595 target->id * num_webs + hardreg2web[color + i]->id))
596 return 0;
598 for (wl = target->conflict_list; wl; wl = wl->next)
600 struct web *pweb = wl->t;
601 if (pweb->type == SELECT || pweb->type == COALESCED)
602 continue;
604 /* Coalescing target (T) and source (S) is o.k, if for
605 all conflicts C of T it is true, that:
606 1) C will be colored, or
607 2) C is a hardreg (precolored), or
608 3) C already conflicts with S too, or
609 4) a web which contains C conflicts already with S.
610 XXX: we handle here only the special case of 4), that C is
611 a subreg, and the containing thing is the reg itself, i.e.
612 we dont handle the situation, were T conflicts with
613 (subreg:SI x 1), and S conflicts with (subreg:DI x 0), which
614 would be allowed also, as the S-conflict overlaps
615 the T-conflict.
616 So, we first test the whole web for any of these conditions, and
617 continue with the next C, if 1, 2 or 3 is true. */
618 if (pweb->num_conflicts < NUM_REGS (pweb)
619 || pweb->type == PRECOLORED
620 || TEST_BIT (igraph, igraph_index (source->id, pweb->id)) )
621 continue;
623 /* This is reached, if not one of 1, 2 or 3 was true. In the case C has
624 no subwebs, 4 can't be true either, so we can't coalesce S and T. */
625 if (wl->sub == NULL)
626 return 0;
627 else
629 /* The main webs do _not_ conflict, only some parts of both. This
630 means, that 4 is possibly true, so we need to check this too.
631 For this we go through all sub conflicts between T and C, and see if
632 the target part of C already conflicts with S. When this is not
633 the case we disallow coalescing. */
634 struct sub_conflict *sl;
635 for (sl = wl->sub; sl; sl = sl->next)
637 if (!TEST_BIT (igraph, igraph_index (source->id, sl->t->id)))
638 return 0;
642 return 1;
645 /* Non-precolored node coalescing heuristic. */
647 static int
648 conservative (struct web *target, struct web *source)
650 unsigned int k;
651 unsigned int loop;
652 regset seen;
653 struct conflict_link *wl;
654 unsigned int num_regs = NUM_REGS (target); /* XXX */
656 /* k counts the resulting conflict weight, if target and source
657 would be merged, and all low-degree neighbors would be
658 removed. */
659 k = 0 * MAX (target->add_hardregs, source->add_hardregs);
660 seen = BITMAP_XMALLOC ();
661 for (loop = 0; loop < 2; loop++)
662 for (wl = ((loop == 0) ? target : source)->conflict_list;
663 wl; wl = wl->next)
665 struct web *pweb = wl->t;
666 if (pweb->type != SELECT && pweb->type != COALESCED
667 && pweb->num_conflicts >= NUM_REGS (pweb)
668 && ! REGNO_REG_SET_P (seen, pweb->id))
670 SET_REGNO_REG_SET (seen, pweb->id);
671 k += 1 + pweb->add_hardregs;
674 BITMAP_XFREE (seen);
676 if (k >= num_regs)
677 return 0;
678 return 1;
681 /* If the web is coalesced, return it's alias. Otherwise, return what
682 was passed in. */
684 struct web *
685 alias (struct web *web)
687 while (web->type == COALESCED)
688 web = web->alias;
689 return web;
692 /* Returns nonzero, if the TYPE belongs to one of those representing
693 SIMPLIFY types. */
695 static inline unsigned int
696 simplify_p (enum ra_node_type type)
698 return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT;
701 /* Actually combine two webs, that can be coalesced. */
703 static void
704 combine (struct web *u, struct web *v)
706 int i;
707 struct conflict_link *wl;
708 gcc_assert (u != v);
709 gcc_assert (v->type != COALESCED);
710 gcc_assert ((u->regno >= max_normal_pseudo)
711 == (v->regno >= max_normal_pseudo));
712 remove_web_from_list (v);
713 put_web (v, COALESCED);
714 v->alias = u;
715 u->is_coalesced = 1;
716 v->is_coalesced = 1;
717 u->num_aliased += 1 + v->num_aliased;
718 if (flag_ra_merge_spill_costs && u->type != PRECOLORED)
719 u->spill_cost += v->spill_cost;
720 /*u->spill_cost = MAX (u->spill_cost, v->spill_cost);*/
721 merge_moves (u, v);
722 /* combine add_hardregs's of U and V. */
724 for (wl = v->conflict_list; wl; wl = wl->next)
726 struct web *pweb = wl->t;
727 /* We don't strictly need to move conflicts between webs which are
728 already coalesced or selected, if we do iterated coalescing, or
729 better if we need not to be able to break aliases again.
730 I.e. normally we would use the condition
731 (pweb->type != SELECT && pweb->type != COALESCED).
732 But for now we simply merge all conflicts. It doesn't take that
733 much time. */
734 if (1)
736 struct web *web = u;
737 int nregs = 1 + v->add_hardregs;
738 if (u->type == PRECOLORED)
739 nregs = hard_regno_nregs[u->color][GET_MODE (v->orig_x)];
741 /* For precolored U's we need to make conflicts between V's
742 neighbors and as many hardregs from U as V needed if it gets
743 color U. For now we approximate this by V->add_hardregs, which
744 could be too much in multi-length classes. We should really
745 count how many hardregs are needed for V with color U. When U
746 isn't precolored this loop breaks out after one iteration. */
747 for (i = 0; i < nregs; i++)
749 if (u->type == PRECOLORED)
750 web = hardreg2web[i + u->color];
751 if (wl->sub == NULL)
752 record_conflict (web, pweb);
753 else
755 struct sub_conflict *sl;
756 /* So, between V and PWEB there are sub_conflicts. We
757 need to relocate those conflicts to be between WEB (==
758 U when it wasn't precolored) and PWEB. In the case
759 only a part of V conflicted with (part of) PWEB we
760 nevertheless make the new conflict between the whole U
761 and the (part of) PWEB. Later we might try to find in
762 U the correct subpart corresponding (by size and
763 offset) to the part of V (sl->s) which was the source
764 of the conflict. */
765 for (sl = wl->sub; sl; sl = sl->next)
767 /* Beware: sl->s is no subweb of web (== U) but of V.
768 We try to search a corresponding subpart of U.
769 If we found none we let it conflict with the whole U.
770 Note that find_subweb() only looks for mode and
771 subreg_byte of the REG rtx but not for the pseudo
772 reg number (otherwise it would be guaranteed to
773 _not_ find any subpart). */
774 struct web *sweb = NULL;
775 if (SUBWEB_P (sl->s))
776 sweb = find_subweb (web, sl->s->orig_x);
777 if (!sweb)
778 sweb = web;
779 record_conflict (sweb, sl->t);
782 if (u->type != PRECOLORED)
783 break;
785 if (pweb->type != SELECT && pweb->type != COALESCED)
786 decrement_degree (pweb, 1 + v->add_hardregs);
790 /* Now merge the usable_regs together. */
791 /* XXX That merging might normally make it necessary to
792 adjust add_hardregs, which also means to adjust neighbors. This can
793 result in making some more webs trivially colorable, (or the opposite,
794 if this increases our add_hardregs). Because we intersect the
795 usable_regs it should only be possible to decrease add_hardregs. So a
796 conservative solution for now is to simply don't change it. */
797 u->use_my_regs = 1;
798 AND_HARD_REG_SET (u->usable_regs, v->usable_regs);
799 u->regclass = reg_class_subunion[u->regclass][v->regclass];
800 /* Count number of possible hardregs. This might make U a spillweb,
801 but that could also happen, if U and V together had too many
802 conflicts. */
803 u->num_freedom = hard_regs_count (u->usable_regs);
804 u->num_freedom -= u->add_hardregs;
805 /* The next checks for an invalid coalesced move (both webs must have
806 possible hardregs in common). */
807 gcc_assert (u->num_freedom);
809 if (u->num_conflicts >= NUM_REGS (u)
810 && (u->type == FREEZE || simplify_p (u->type)))
812 remove_web_from_list (u);
813 put_web (u, SPILL);
816 /* We want the most relaxed combination of spill_temp state.
817 I.e. if any was no spilltemp or a spilltemp2, the result is so too,
818 otherwise if any is short, the result is too. It remains, when both
819 are normal spilltemps. */
820 if (v->spill_temp == 0)
821 u->spill_temp = 0;
822 else if (v->spill_temp == 2 && u->spill_temp != 0)
823 u->spill_temp = 2;
824 else if (v->spill_temp == 3 && u->spill_temp == 1)
825 u->spill_temp = 3;
828 /* Attempt to coalesce the first thing on the move worklist.
829 This is used only for iterated coalescing. */
831 static void
832 coalesce (void)
834 struct dlist *d = pop_list (&mv_worklist);
835 struct move *m = DLIST_MOVE (d);
836 struct web *source = alias (m->source_web);
837 struct web *target = alias (m->target_web);
839 if (target->type == PRECOLORED)
841 struct web *h = source;
842 source = target;
843 target = h;
845 if (source == target)
847 remove_move (source, m);
848 put_move (m, MV_COALESCED);
849 add_worklist (source);
851 else if (target->type == PRECOLORED
852 || TEST_BIT (sup_igraph, source->id * num_webs + target->id)
853 || TEST_BIT (sup_igraph, target->id * num_webs + source->id)
854 || !ok_class (target, source))
856 remove_move (source, m);
857 remove_move (target, m);
858 put_move (m, CONSTRAINED);
859 add_worklist (source);
860 add_worklist (target);
862 else if ((source->type == PRECOLORED && ok (target, source))
863 || (source->type != PRECOLORED
864 && conservative (target, source)))
866 remove_move (source, m);
867 remove_move (target, m);
868 put_move (m, MV_COALESCED);
869 combine (source, target);
870 add_worklist (source);
872 else
873 put_move (m, ACTIVE);
876 /* Freeze the moves associated with the web. Used for iterated coalescing. */
878 static void
879 freeze_moves (struct web *web)
881 struct move_list *ml, *ml_next;
882 for (ml = web->moves; ml; ml = ml_next)
884 struct move *m = ml->move;
885 struct web *src, *dest;
886 ml_next = ml->next;
887 if (m->type == ACTIVE)
888 remove_list (m->dlink, &mv_active);
889 else
890 remove_list (m->dlink, &mv_worklist);
891 put_move (m, FROZEN);
892 remove_move (web, m);
893 src = alias (m->source_web);
894 dest = alias (m->target_web);
895 src = (src == web) ? dest : src;
896 remove_move (src, m);
897 /* XXX GA use the original v, instead of alias(v) */
898 if (!src->moves && src->num_conflicts < NUM_REGS (src))
900 remove_list (src->dlink, &WEBS(FREEZE));
901 put_web (src, SIMPLIFY);
906 /* Freeze the first thing on the freeze worklist (only for iterated
907 coalescing). */
909 static void
910 freeze (void)
912 struct dlist *d = pop_list (&WEBS(FREEZE));
913 put_web (DLIST_WEB (d), SIMPLIFY);
914 freeze_moves (DLIST_WEB (d));
917 /* The current spill heuristic. Returns a number for a WEB.
918 Webs with higher numbers are selected later. */
920 static unsigned HOST_WIDE_INT (*spill_heuristic) (struct web *);
922 static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *);
924 /* Our default heuristic is similar to spill_cost / num_conflicts.
925 Just scaled for integer arithmetic, and it favors coalesced webs,
926 and webs which span more insns with deaths. */
928 static unsigned HOST_WIDE_INT
929 default_spill_heuristic (struct web *web)
931 unsigned HOST_WIDE_INT ret;
932 unsigned int divisor = 1;
933 /* Make coalesce targets cheaper to spill, because they will be broken
934 up again into smaller parts. */
935 if (flag_ra_break_aliases)
936 divisor += web->num_aliased;
937 divisor += web->num_conflicts;
938 ret = ((web->spill_cost << 8) + divisor - 1) / divisor;
939 /* It is better to spill webs that span more insns (deaths in our
940 case) than other webs with the otherwise same spill_cost. So make
941 them a little bit cheaper. Remember that spill_cost is unsigned. */
942 if (web->span_deaths < ret)
943 ret -= web->span_deaths;
944 return ret;
947 /* Select the cheapest spill to be potentially spilled (we don't
948 *actually* spill until we need to). */
950 static void
951 select_spill (void)
953 unsigned HOST_WIDE_INT best = (unsigned HOST_WIDE_INT) -1;
954 struct dlist *bestd = NULL;
955 unsigned HOST_WIDE_INT best2 = (unsigned HOST_WIDE_INT) -1;
956 struct dlist *bestd2 = NULL;
957 struct dlist *d;
958 for (d = WEBS(SPILL); d; d = d->next)
960 struct web *w = DLIST_WEB (d);
961 unsigned HOST_WIDE_INT cost = spill_heuristic (w);
962 if ((!w->spill_temp) && cost < best)
964 best = cost;
965 bestd = d;
967 /* Specially marked spill temps can be spilled. Also coalesce
968 targets can. Eventually they will be broken up later in the
969 colorizing process, so if we have nothing better take that. */
970 else if ((w->spill_temp == 2 || w->is_coalesced) && cost < best2)
972 best2 = cost;
973 bestd2 = d;
976 if (!bestd)
978 bestd = bestd2;
979 best = best2;
981 gcc_assert (bestd);
983 /* Note the potential spill. */
984 DLIST_WEB (bestd)->was_spilled = 1;
985 remove_list (bestd, &WEBS(SPILL));
986 put_web (DLIST_WEB (bestd), SIMPLIFY);
987 freeze_moves (DLIST_WEB (bestd));
988 ra_debug_msg (DUMP_PROCESS, " potential spill web %3d, conflicts = %d\n",
989 DLIST_WEB (bestd)->id, DLIST_WEB (bestd)->num_conflicts);
992 /* Given a set of forbidden colors to begin at, and a set of still
993 free colors, and MODE, returns nonzero of color C is still usable. */
995 static int
996 color_usable_p (int c, HARD_REG_SET dont_begin_colors,
997 HARD_REG_SET free_colors, enum machine_mode mode)
999 if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
1000 && TEST_HARD_REG_BIT (free_colors, c)
1001 && HARD_REGNO_MODE_OK (c, mode))
1003 int i, size;
1004 size = hard_regno_nregs[c][mode];
1005 for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
1006 if (i == size)
1007 return 1;
1009 return 0;
1012 /* I don't want to clutter up the actual code with ifdef's. */
1013 #ifdef REG_ALLOC_ORDER
1014 #define INV_REG_ALLOC_ORDER(c) inv_reg_alloc_order[c]
1015 #else
1016 #define INV_REG_ALLOC_ORDER(c) c
1017 #endif
1019 /* Searches in FREE_COLORS for a block of hardregs of the right length
1020 for MODE, which doesn't begin at a hardreg mentioned in DONT_BEGIN_COLORS.
1021 If it needs more than one hardreg it prefers blocks beginning
1022 at an even hardreg, and only gives an odd begin reg if no other
1023 block could be found. */
1026 get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors,
1027 enum machine_mode mode)
1029 int c;
1030 int last_resort_reg = -1;
1031 int pref_reg = -1;
1032 int pref_reg_order = INT_MAX;
1033 int last_resort_reg_order = INT_MAX;
1035 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1036 if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
1037 && TEST_HARD_REG_BIT (free_colors, c)
1038 && HARD_REGNO_MODE_OK (c, mode))
1040 int i, size;
1041 size = hard_regno_nregs[c][mode];
1042 for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
1043 if (i != size)
1045 c += i;
1046 continue;
1048 if (i == size)
1050 if (size < 2 || (c & 1) == 0)
1052 if (INV_REG_ALLOC_ORDER (c) < pref_reg_order)
1054 pref_reg = c;
1055 pref_reg_order = INV_REG_ALLOC_ORDER (c);
1058 else if (INV_REG_ALLOC_ORDER (c) < last_resort_reg_order)
1060 last_resort_reg = c;
1061 last_resort_reg_order = INV_REG_ALLOC_ORDER (c);
1064 else
1065 c += i;
1067 return pref_reg >= 0 ? pref_reg : last_resort_reg;
1070 /* Similar to get_free_reg(), but first search in colors provided
1071 by BIAS _and_ PREFER_COLORS, then in BIAS alone, then in PREFER_COLORS
1072 alone, and only then for any free color. If flag_ra_biased is zero
1073 only do the last two steps. */
1075 static int
1076 get_biased_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET bias,
1077 HARD_REG_SET prefer_colors, HARD_REG_SET free_colors,
1078 enum machine_mode mode)
1080 int c = -1;
1081 HARD_REG_SET s;
1082 if (flag_ra_biased)
1084 COPY_HARD_REG_SET (s, dont_begin_colors);
1085 IOR_COMPL_HARD_REG_SET (s, bias);
1086 IOR_COMPL_HARD_REG_SET (s, prefer_colors);
1087 c = get_free_reg (s, free_colors, mode);
1088 if (c >= 0)
1089 return c;
1090 COPY_HARD_REG_SET (s, dont_begin_colors);
1091 IOR_COMPL_HARD_REG_SET (s, bias);
1092 c = get_free_reg (s, free_colors, mode);
1093 if (c >= 0)
1094 return c;
1096 COPY_HARD_REG_SET (s, dont_begin_colors);
1097 IOR_COMPL_HARD_REG_SET (s, prefer_colors);
1098 c = get_free_reg (s, free_colors, mode);
1099 if (c >= 0)
1100 return c;
1101 c = get_free_reg (dont_begin_colors, free_colors, mode);
1102 return c;
1105 /* Counts the number of non-overlapping bitblocks of length LEN
1106 in FREE_COLORS. */
1108 static int
1109 count_long_blocks (HARD_REG_SET free_colors, int len)
1111 int i, j;
1112 int count = 0;
1113 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1115 if (!TEST_HARD_REG_BIT (free_colors, i))
1116 continue;
1117 for (j = 1; j < len; j++)
1118 if (!TEST_HARD_REG_BIT (free_colors, i + j))
1119 break;
1120 /* Bits [i .. i+j-1] are free. */
1121 if (j == len)
1122 count++;
1123 i += j - 1;
1125 return count;
1128 /* Given a hardreg set S, return a string representing it.
1129 Either as 0/1 string, or as hex value depending on the implementation
1130 of hardreg sets. Note that this string is statically allocated. */
1132 static char *
1133 hardregset_to_string (HARD_REG_SET s)
1135 static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024];
1136 #if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDEST_FAST_INT
1137 sprintf (string, HOST_WIDE_INT_PRINT_HEX, (HOST_WIDE_INT) s);
1138 #else
1139 char *c = string;
1140 int i,j;
1141 c += sprintf (c, "{ ");
1142 for (i = 0;i < HARD_REG_SET_LONGS; i++)
1144 for (j = 0; j < HOST_BITS_PER_WIDEST_FAST_INT; j++)
1145 c += sprintf (c, "%s", ( 1 << j) & s[i] ? "1" : "0");
1146 c += sprintf (c, "%s", i ? ", " : "");
1148 c += sprintf (c, " }");
1149 #endif
1150 return string;
1153 /* For WEB, look at its already colored neighbors, and calculate
1154 the set of hardregs which is not allowed as color for WEB. Place
1155 that set int *RESULT. Note that the set of forbidden begin colors
1156 is not the same as all colors taken up by neighbors. E.g. suppose
1157 two DImode webs, but only the lo-part from one conflicts with the
1158 hipart from the other, and suppose the other gets colors 2 and 3
1159 (it needs two SImode hardregs). Now the first can take also color
1160 1 or 2, although in those cases there's a partial overlap. Only
1161 3 can't be used as begin color. */
1163 static void
1164 calculate_dont_begin (struct web *web, HARD_REG_SET *result)
1166 struct conflict_link *wl;
1167 HARD_REG_SET dont_begin;
1168 /* The bits set in dont_begin correspond to the hardregs, at which
1169 WEB may not begin. This differs from the set of _all_ hardregs which
1170 are taken by WEB's conflicts in the presence of wide webs, where only
1171 some parts conflict with others. */
1172 CLEAR_HARD_REG_SET (dont_begin);
1173 for (wl = web->conflict_list; wl; wl = wl->next)
1175 struct web *w;
1176 struct web *ptarget = alias (wl->t);
1177 struct sub_conflict *sl = wl->sub;
1178 w = sl ? sl->t : wl->t;
1179 while (w)
1181 if (ptarget->type == COLORED || ptarget->type == PRECOLORED)
1183 struct web *source = (sl) ? sl->s : web;
1184 unsigned int tsize = hard_regno_nregs[ptarget->color]
1185 [GET_MODE (w->orig_x)];
1186 /* ssize is only a first guess for the size. */
1187 unsigned int ssize = hard_regno_nregs[ptarget->color][GET_MODE
1188 (source->orig_x)];
1189 unsigned int tofs = 0;
1190 unsigned int sofs = 0;
1191 /* C1 and C2 can become negative, so unsigned
1192 would be wrong. */
1193 int c1, c2;
1195 if (SUBWEB_P (w)
1196 && GET_MODE_SIZE (GET_MODE (w->orig_x)) >= UNITS_PER_WORD)
1197 tofs = (SUBREG_BYTE (w->orig_x) / UNITS_PER_WORD);
1198 if (SUBWEB_P (source)
1199 && GET_MODE_SIZE (GET_MODE (source->orig_x))
1200 >= UNITS_PER_WORD)
1201 sofs = (SUBREG_BYTE (source->orig_x) / UNITS_PER_WORD);
1202 c1 = ptarget->color + tofs - sofs - ssize + 1;
1203 c2 = ptarget->color + tofs + tsize - 1 - sofs;
1204 if (c2 >= 0)
1206 if (c1 < 0)
1207 c1 = 0;
1208 /* Because ssize was only guessed above, which influenced our
1209 begin color (c1), we need adjustment, if for that color
1210 another size would be needed. This is done by moving
1211 c1 to a place, where the last of sources hardregs does not
1212 overlap the first of targets colors. */
1213 while (c1 + sofs
1214 + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
1215 < ptarget->color + tofs)
1216 c1++;
1217 while (c1 > 0 && c1 + sofs
1218 + hard_regno_nregs[c1][GET_MODE (source->orig_x)] - 1
1219 > ptarget->color + tofs)
1220 c1--;
1221 for (; c1 <= c2; c1++)
1222 SET_HARD_REG_BIT (dont_begin, c1);
1225 /* The next if() only gets true, if there was no wl->sub at all, in
1226 which case we are only making one go through this loop with W being
1227 a whole web. */
1228 if (!sl)
1229 break;
1230 sl = sl->next;
1231 w = sl ? sl->t : NULL;
1234 COPY_HARD_REG_SET (*result, dont_begin);
1237 /* Try to assign a color to WEB. If HARD if nonzero, we try many
1238 tricks to get it one color, including respilling already colored
1239 neighbors.
1241 We also trie very hard, to not constrain the uncolored non-spill
1242 neighbors, which need more hardregs than we. Consider a situation, 2
1243 hardregs free for us (0 and 1), and one of our neighbors needs 2
1244 hardregs, and only conflicts with us. There are 3 hardregs at all. Now
1245 a simple minded method might choose 1 as color for us. Then our neighbor
1246 has two free colors (0 and 2) as it should, but they are not consecutive,
1247 so coloring it later would fail. This leads to nasty problems on
1248 register starved machines, so we try to avoid this. */
1250 static void
1251 colorize_one_web (struct web *web, int hard)
1253 struct conflict_link *wl;
1254 HARD_REG_SET colors, dont_begin;
1255 int c = -1;
1256 int bestc = -1;
1257 int neighbor_needs= 0;
1258 struct web *fats_parent = NULL;
1259 int num_fat = 0;
1260 int long_blocks = 0;
1261 int best_long_blocks = -1;
1262 HARD_REG_SET fat_colors;
1263 HARD_REG_SET bias;
1265 CLEAR_HARD_REG_SET (fat_colors);
1267 if (web->regno >= max_normal_pseudo)
1268 hard = 0;
1270 /* First we want to know the colors at which we can't begin. */
1271 calculate_dont_begin (web, &dont_begin);
1272 CLEAR_HARD_REG_SET (bias);
1274 /* Now setup the set of colors used by our neighbors neighbors,
1275 and search the biggest noncolored neighbor. */
1276 neighbor_needs = web->add_hardregs + 1;
1277 for (wl = web->conflict_list; wl; wl = wl->next)
1279 struct web *w;
1280 struct web *ptarget = alias (wl->t);
1281 struct sub_conflict *sl = wl->sub;
1282 IOR_HARD_REG_SET (bias, ptarget->bias_colors);
1283 w = sl ? sl->t : wl->t;
1284 if (ptarget->type != COLORED && ptarget->type != PRECOLORED
1285 && !ptarget->was_spilled)
1286 while (w)
1288 if (find_web_for_subweb (w)->type != COALESCED
1289 && w->add_hardregs >= neighbor_needs)
1291 neighbor_needs = w->add_hardregs;
1292 fats_parent = ptarget;
1293 num_fat++;
1295 if (!sl)
1296 break;
1297 sl = sl->next;
1298 w = sl ? sl->t : NULL;
1302 ra_debug_msg (DUMP_COLORIZE, "colorize web %d [don't begin at %s]", web->id,
1303 hardregset_to_string (dont_begin));
1305 /* If there are some fat neighbors, remember their usable regs,
1306 and how many blocks are free in it for that neighbor. */
1307 if (num_fat)
1309 COPY_HARD_REG_SET (fat_colors, fats_parent->usable_regs);
1310 long_blocks = count_long_blocks (fat_colors, neighbor_needs + 1);
1313 /* We break out, if we found a color which doesn't constrain
1314 neighbors, or if we can't find any colors. */
1315 while (1)
1317 HARD_REG_SET call_clobbered;
1319 /* Here we choose a hard-reg for the current web. For non spill
1320 temporaries we first search in the hardregs for it's preferred
1321 class, then, if we found nothing appropriate, in those of the
1322 alternate class. For spill temporaries we only search in
1323 usable_regs of this web (which is probably larger than that of
1324 the preferred or alternate class). All searches first try to
1325 find a non-call-clobbered hard-reg.
1326 XXX this should be more finegraned... First look into preferred
1327 non-callclobbered hardregs, then _if_ the web crosses calls, in
1328 alternate non-cc hardregs, and only _then_ also in preferred cc
1329 hardregs (and alternate ones). Currently we don't track the number
1330 of calls crossed for webs. We should. */
1331 if (web->use_my_regs)
1333 COPY_HARD_REG_SET (colors, web->usable_regs);
1334 AND_HARD_REG_SET (colors,
1335 usable_regs[reg_preferred_class (web->regno)]);
1337 else
1338 COPY_HARD_REG_SET (colors,
1339 usable_regs[reg_preferred_class (web->regno)]);
1340 #ifdef CANNOT_CHANGE_MODE_CLASS
1341 if (web->mode_changed)
1342 AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
1343 #endif
1344 COPY_HARD_REG_SET (call_clobbered, colors);
1345 AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
1347 /* If this web got a color in the last pass, try to give it the
1348 same color again. This will to much better colorization
1349 down the line, as we spilled for a certain coloring last time. */
1350 if (web->old_color)
1352 c = web->old_color - 1;
1353 if (!color_usable_p (c, dont_begin, colors,
1354 PSEUDO_REGNO_MODE (web->regno)))
1355 c = -1;
1357 else
1358 c = -1;
1359 if (c < 0)
1360 c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1361 call_clobbered, PSEUDO_REGNO_MODE (web->regno));
1362 if (c < 0)
1363 c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1364 colors, PSEUDO_REGNO_MODE (web->regno));
1366 if (c < 0)
1368 if (web->use_my_regs)
1369 IOR_HARD_REG_SET (colors, web->usable_regs);
1370 else
1371 IOR_HARD_REG_SET (colors, usable_regs
1372 [reg_alternate_class (web->regno)]);
1373 #ifdef CANNOT_CHANGE_MODE_CLASS
1374 if (web->mode_changed)
1375 AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
1376 #endif
1377 COPY_HARD_REG_SET (call_clobbered, colors);
1378 AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
1380 c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1381 call_clobbered, PSEUDO_REGNO_MODE (web->regno));
1382 if (c < 0)
1383 c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1384 colors, PSEUDO_REGNO_MODE (web->regno));
1386 if (c < 0)
1387 break;
1388 if (bestc < 0)
1389 bestc = c;
1390 /* If one of the yet uncolored neighbors, which is not a potential
1391 spill needs a block of hardregs be sure, not to destroy such a block
1392 by coloring one reg in the middle. */
1393 if (num_fat)
1395 int i;
1396 int new_long;
1397 HARD_REG_SET colors1;
1398 COPY_HARD_REG_SET (colors1, fat_colors);
1399 for (i = 0; i < 1 + web->add_hardregs; i++)
1400 CLEAR_HARD_REG_BIT (colors1, c + i);
1401 new_long = count_long_blocks (colors1, neighbor_needs + 1);
1402 /* If we changed the number of long blocks, and it's now smaller
1403 than needed, we try to avoid this color. */
1404 if (long_blocks != new_long && new_long < num_fat)
1406 if (new_long > best_long_blocks)
1408 best_long_blocks = new_long;
1409 bestc = c;
1411 SET_HARD_REG_BIT (dont_begin, c);
1412 ra_debug_msg (DUMP_COLORIZE, " avoid %d", c);
1414 else
1415 /* We found a color which doesn't destroy a block. */
1416 break;
1418 /* If we havee no fat neighbors, the current color won't become
1419 "better", so we've found it. */
1420 else
1421 break;
1423 ra_debug_msg (DUMP_COLORIZE, " --> got %d", c < 0 ? bestc : c);
1424 if (bestc >= 0 && c < 0 && !web->was_spilled)
1426 /* This is a non-potential-spill web, which got a color, which did
1427 destroy a hardreg block for one of it's neighbors. We color
1428 this web anyway and hope for the best for the neighbor, if we are
1429 a spill temp. */
1430 if (1 || web->spill_temp)
1431 c = bestc;
1432 ra_debug_msg (DUMP_COLORIZE, " [constrains neighbors]");
1434 ra_debug_msg (DUMP_COLORIZE, "\n");
1436 if (c < 0)
1438 /* Guard against a simplified node being spilled. */
1439 /* Don't assert. This can happen, when e.g. enough registers
1440 are available in colors, but they are not consecutive. This is a
1441 very serious issue if this web is a short live one, because
1442 even if we spill this one here, the situation won't become better
1443 in the next iteration. It probably will have the same conflicts,
1444 those will have the same colors, and we would come here again, for
1445 all parts, in which this one gets split by the spill. This
1446 can result in endless iteration spilling the same register again and
1447 again. That's why we try to find a neighbor, which spans more
1448 instructions that ourself, and got a color, and try to spill _that_.
1450 gcc_assert (DLIST_WEB (d)->was_spilled >= 0); */
1451 if (hard && (!web->was_spilled || web->spill_temp))
1453 unsigned int loop;
1454 struct web *try = NULL;
1455 struct web *candidates[8];
1457 ra_debug_msg (DUMP_COLORIZE, " *** %d spilled, although %s ***\n",
1458 web->id, web->spill_temp ? "spilltemp" : "non-spill");
1459 /* We make multiple passes over our conflicts, first trying to
1460 spill those webs, which only got a color by chance, but
1461 were potential spill ones, and if that isn't enough, in a second
1462 pass also to spill normal colored webs. If we still didn't find
1463 a candidate, but we are a spill-temp, we make a third pass
1464 and include also webs, which were targets for coalescing, and
1465 spill those. */
1466 memset (candidates, 0, sizeof candidates);
1467 #define set_cand(i, w) \
1468 do { \
1469 if (!candidates[(i)] \
1470 || (candidates[(i)]->spill_cost < (w)->spill_cost)) \
1471 candidates[(i)] = (w); \
1472 } while (0)
1473 for (wl = web->conflict_list; wl; wl = wl->next)
1475 struct web *w = wl->t;
1476 struct web *aw = alias (w);
1477 /* If we are a spill-temp, we also look at webs coalesced
1478 to precolored ones. Otherwise we only look at webs which
1479 themselves were colored, or coalesced to one. */
1480 if (aw->type == PRECOLORED && w != aw && web->spill_temp
1481 && flag_ra_optimistic_coalescing)
1483 if (!w->spill_temp)
1484 set_cand (4, w);
1485 else if (web->spill_temp == 2
1486 && w->spill_temp == 2
1487 && w->spill_cost < web->spill_cost)
1488 set_cand (5, w);
1489 else if (web->spill_temp != 2
1490 && (w->spill_temp == 2
1491 || w->spill_cost < web->spill_cost))
1492 set_cand (6, w);
1493 continue;
1495 if (aw->type != COLORED)
1496 continue;
1497 if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
1498 && w->was_spilled)
1500 if (w->spill_cost < web->spill_cost)
1501 set_cand (0, w);
1502 else if (web->spill_temp)
1503 set_cand (1, w);
1505 if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
1506 && !w->was_spilled)
1508 if (w->spill_cost < web->spill_cost)
1509 set_cand (2, w);
1510 else if (web->spill_temp && web->spill_temp != 2)
1511 set_cand (3, w);
1513 if (web->spill_temp)
1515 if (w->type == COLORED && w->spill_temp == 2
1516 && !w->is_coalesced
1517 && (w->spill_cost < web->spill_cost
1518 || web->spill_temp != 2))
1519 set_cand (4, w);
1520 if (!aw->spill_temp)
1521 set_cand (5, aw);
1522 if (aw->spill_temp == 2
1523 && (aw->spill_cost < web->spill_cost
1524 || web->spill_temp != 2))
1525 set_cand (6, aw);
1526 /* For boehm-gc/misc.c. If we are a difficult spilltemp,
1527 also coalesced neighbors are a chance, _even_ if they
1528 too are spilltemps. At least their coalescing can be
1529 broken up, which may be reset usable_regs, and makes
1530 it easier colorable. */
1531 if (web->spill_temp != 2 && aw->is_coalesced
1532 && flag_ra_optimistic_coalescing)
1533 set_cand (7, aw);
1536 for (loop = 0; try == NULL && loop < 8; loop++)
1537 if (candidates[loop])
1538 try = candidates[loop];
1539 #undef set_cand
1540 if (try)
1542 int old_c = try->color;
1543 if (try->type == COALESCED)
1545 gcc_assert (alias (try)->type == PRECOLORED);
1546 ra_debug_msg (DUMP_COLORIZE, " breaking alias %d -> %d\n",
1547 try->id, alias (try)->id);
1548 break_precolored_alias (try);
1549 colorize_one_web (web, hard);
1551 else
1553 remove_list (try->dlink, &WEBS(COLORED));
1554 put_web (try, SPILLED);
1555 /* Now try to colorize us again. Can recursively make other
1556 webs also spill, until there are no more unspilled
1557 neighbors. */
1558 ra_debug_msg (DUMP_COLORIZE, " trying to spill %d\n", try->id);
1559 colorize_one_web (web, hard);
1560 if (web->type != COLORED)
1562 /* We tried recursively to spill all already colored
1563 neighbors, but we are still uncolorable. So it made
1564 no sense to spill those neighbors. Recolor them. */
1565 remove_list (try->dlink, &WEBS(SPILLED));
1566 put_web (try, COLORED);
1567 try->color = old_c;
1568 ra_debug_msg (DUMP_COLORIZE,
1569 " spilling %d was useless\n", try->id);
1571 else
1573 ra_debug_msg (DUMP_COLORIZE,
1574 " to spill %d was a good idea\n",
1575 try->id);
1576 remove_list (try->dlink, &WEBS(SPILLED));
1577 if (try->was_spilled)
1578 colorize_one_web (try, 0);
1579 else
1580 colorize_one_web (try, hard - 1);
1584 else
1585 /* No more chances to get a color, so give up hope and
1586 spill us. */
1587 put_web (web, SPILLED);
1589 else
1590 put_web (web, SPILLED);
1592 else
1594 put_web (web, COLORED);
1595 web->color = c;
1596 if (flag_ra_biased)
1598 int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
1599 for (wl = web->conflict_list; wl; wl = wl->next)
1601 struct web *ptarget = alias (wl->t);
1602 int i;
1603 for (i = 0; i < nregs; i++)
1604 SET_HARD_REG_BIT (ptarget->bias_colors, c + i);
1608 if (web->regno >= max_normal_pseudo && web->type == SPILLED)
1610 web->color = an_unusable_color;
1611 remove_list (web->dlink, &WEBS(SPILLED));
1612 put_web (web, COLORED);
1614 if (web->type == SPILLED && flag_ra_optimistic_coalescing
1615 && web->is_coalesced)
1617 ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
1618 restore_conflicts_from_coalesce (web);
1619 break_aliases_to_web (web);
1620 insert_coalesced_conflicts ();
1621 ra_debug_msg (DUMP_COLORIZE, "\n");
1622 remove_list (web->dlink, &WEBS(SPILLED));
1623 put_web (web, SELECT);
1624 web->color = -1;
1628 /* Assign the colors to all nodes on the select stack. And update the
1629 colors of coalesced webs. */
1631 static void
1632 assign_colors (void)
1634 struct dlist *d;
1636 while (WEBS(SELECT))
1638 d = pop_list (&WEBS(SELECT));
1639 colorize_one_web (DLIST_WEB (d), 1);
1642 for (d = WEBS(COALESCED); d; d = d->next)
1644 struct web *a = alias (DLIST_WEB (d));
1645 DLIST_WEB (d)->color = a->color;
1649 /* WEB is a spilled web. Look if we can improve the cost of the graph,
1650 by coloring WEB, even if we then need to spill some of it's neighbors.
1651 For this we calculate the cost for each color C, that results when we
1652 _would_ give WEB color C (i.e. the cost of the then spilled neighbors).
1653 If the lowest cost among them is smaller than the spillcost of WEB, we
1654 do that recoloring, and instead spill the neighbors.
1656 This can sometime help, when due to irregularities in register file,
1657 and due to multi word pseudos, the colorization is suboptimal. But
1658 be aware, that currently this pass is quite slow. */
1660 static void
1661 try_recolor_web (struct web *web)
1663 struct conflict_link *wl;
1664 unsigned HOST_WIDE_INT *cost_neighbors;
1665 unsigned int *min_color;
1666 int newcol, c;
1667 HARD_REG_SET precolored_neighbors, spill_temps;
1668 HARD_REG_SET possible_begin, wide_seen;
1669 cost_neighbors = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (cost_neighbors[0]));
1670 /* For each hard-regs count the number of preceding hardregs, which
1671 would overlap this color, if used in WEB's mode. */
1672 min_color = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (int));
1673 CLEAR_HARD_REG_SET (possible_begin);
1674 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1676 int i, nregs;
1677 if (!HARD_REGNO_MODE_OK (c, GET_MODE (web->orig_x)))
1678 continue;
1679 nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
1680 for (i = 0; i < nregs; i++)
1681 if (!TEST_HARD_REG_BIT (web->usable_regs, c + i))
1682 break;
1683 if (i < nregs || nregs == 0)
1684 continue;
1685 SET_HARD_REG_BIT (possible_begin, c);
1686 for (; nregs--;)
1687 if (!min_color[c + nregs])
1688 min_color[c + nregs] = 1 + c;
1690 CLEAR_HARD_REG_SET (precolored_neighbors);
1691 CLEAR_HARD_REG_SET (spill_temps);
1692 CLEAR_HARD_REG_SET (wide_seen);
1693 for (wl = web->conflict_list; wl; wl = wl->next)
1695 HARD_REG_SET dont_begin;
1696 struct web *web2 = alias (wl->t);
1697 struct conflict_link *nn;
1698 int c1, c2;
1699 int wide_p = 0;
1700 if (wl->t->type == COALESCED || web2->type != COLORED)
1702 if (web2->type == PRECOLORED)
1704 c1 = min_color[web2->color];
1705 c1 = (c1 == 0) ? web2->color : (c1 - 1);
1706 c2 = web2->color;
1707 for (; c1 <= c2; c1++)
1708 SET_HARD_REG_BIT (precolored_neighbors, c1);
1710 continue;
1712 /* Mark colors for which some wide webs are involved. For
1713 those the independent sets are not simply one-node graphs, so
1714 they can't be recolored independent from their neighborhood. This
1715 means, that our cost calculation can be incorrect (assuming it
1716 can avoid spilling a web because it thinks some colors are available,
1717 although it's neighbors which itself need recoloring might take
1718 away exactly those colors). */
1719 if (web2->add_hardregs)
1720 wide_p = 1;
1721 for (nn = web2->conflict_list; nn && !wide_p; nn = nn->next)
1722 if (alias (nn->t)->add_hardregs)
1723 wide_p = 1;
1724 calculate_dont_begin (web2, &dont_begin);
1725 c1 = min_color[web2->color];
1726 /* Note that min_color[] contains 1-based values (zero means
1727 undef). */
1728 c1 = c1 == 0 ? web2->color : (c1 - 1);
1729 c2 = web2->color + hard_regno_nregs[web2->color][GET_MODE
1730 (web2->orig_x)] - 1;
1731 for (; c1 <= c2; c1++)
1732 if (TEST_HARD_REG_BIT (possible_begin, c1))
1734 int nregs;
1735 HARD_REG_SET colors;
1736 nregs = hard_regno_nregs[c1][GET_MODE (web->orig_x)];
1737 COPY_HARD_REG_SET (colors, web2->usable_regs);
1738 for (; nregs--;)
1739 CLEAR_HARD_REG_BIT (colors, c1 + nregs);
1740 if (wide_p)
1741 SET_HARD_REG_BIT (wide_seen, c1);
1742 if (get_free_reg (dont_begin, colors,
1743 GET_MODE (web2->orig_x)) < 0)
1745 if (web2->spill_temp)
1746 SET_HARD_REG_BIT (spill_temps, c1);
1747 else
1748 cost_neighbors[c1] += web2->spill_cost;
1752 newcol = -1;
1753 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1754 if (TEST_HARD_REG_BIT (possible_begin, c)
1755 && !TEST_HARD_REG_BIT (precolored_neighbors, c)
1756 && !TEST_HARD_REG_BIT (spill_temps, c)
1757 && (newcol == -1
1758 || cost_neighbors[c] < cost_neighbors[newcol]))
1759 newcol = c;
1760 if (newcol >= 0 && cost_neighbors[newcol] < web->spill_cost)
1762 int nregs = hard_regno_nregs[newcol][GET_MODE (web->orig_x)];
1763 unsigned HOST_WIDE_INT cost = 0;
1764 int *old_colors;
1765 struct conflict_link *wl_next;
1766 ra_debug_msg (DUMP_COLORIZE, "try to set web %d to color %d\n", web->id,
1767 newcol);
1768 remove_list (web->dlink, &WEBS(SPILLED));
1769 put_web (web, COLORED);
1770 web->color = newcol;
1771 old_colors = xcalloc (num_webs, sizeof (int));
1772 for (wl = web->conflict_list; wl; wl = wl_next)
1774 struct web *web2 = alias (wl->t);
1775 /* If web2 is a coalesce-target, and will become spilled
1776 below in colorize_one_web(), and the current conflict wl
1777 between web and web2 was only the result of that coalescing
1778 this conflict will be deleted, making wl invalid. So save
1779 the next conflict right now. Note that if web2 has indeed
1780 such state, then wl->next can not be deleted in this
1781 iteration. */
1782 wl_next = wl->next;
1783 if (web2->type == COLORED)
1785 int nregs2 = hard_regno_nregs[web2->color][GET_MODE
1786 (web2->orig_x)];
1787 if (web->color >= web2->color + nregs2
1788 || web2->color >= web->color + nregs)
1789 continue;
1790 old_colors[web2->id] = web2->color + 1;
1791 web2->color = -1;
1792 remove_list (web2->dlink, &WEBS(COLORED));
1793 web2->type = SELECT;
1794 /* Allow webs to be spilled. */
1795 if (web2->spill_temp == 0 || web2->spill_temp == 2)
1796 web2->was_spilled = 1;
1797 colorize_one_web (web2, 1);
1798 if (web2->type == SPILLED)
1799 cost += web2->spill_cost;
1802 /* The actual cost may be smaller than the guessed one, because
1803 partial conflicts could result in some conflicting webs getting
1804 a color, where we assumed it must be spilled. See the comment
1805 above what happens, when wide webs are involved, and why in that
1806 case there might actually be some webs spilled although thought to
1807 be colorable. */
1808 gcc_assert (cost <= cost_neighbors[newcol]
1809 || nregs != 1 || TEST_HARD_REG_BIT (wide_seen, newcol));
1810 /* But if the new spill-cost is higher than our own, then really loose.
1811 Respill us and recolor neighbors as before. */
1812 if (cost > web->spill_cost)
1814 ra_debug_msg (DUMP_COLORIZE,
1815 "reset coloring of web %d, too expensive\n", web->id);
1816 remove_list (web->dlink, &WEBS(COLORED));
1817 web->color = -1;
1818 put_web (web, SPILLED);
1819 for (wl = web->conflict_list; wl; wl = wl->next)
1821 struct web *web2 = alias (wl->t);
1822 if (old_colors[web2->id])
1824 switch (web2->type)
1826 case SPILLED:
1827 remove_list (web2->dlink, &WEBS(SPILLED));
1828 web2->color = old_colors[web2->id] - 1;
1829 put_web (web2, COLORED);
1830 break;
1831 case COLORED:
1832 web2->color = old_colors[web2->id] - 1;
1833 break;
1834 case SELECT:
1835 /* This means, that WEB2 once was a part of a coalesced
1836 web, which got spilled in the above colorize_one_web()
1837 call, and whose parts then got split and put back
1838 onto the SELECT stack. As the cause for that splitting
1839 (the coloring of WEB) was worthless, we should again
1840 coalesce the parts, as they were before. For now we
1841 simply leave them SELECTed, for our caller to take
1842 care. */
1843 break;
1844 default:
1845 gcc_unreachable ();
1850 free (old_colors);
1852 free (min_color);
1853 free (cost_neighbors);
1856 /* This ensures that all conflicts of coalesced webs are seen from
1857 the webs coalesced into. combine() only adds the conflicts which
1858 at the time of combining were not already SELECTed or COALESCED
1859 to not destroy num_conflicts. Here we add all remaining conflicts
1860 and thereby destroy num_conflicts. This should be used when num_conflicts
1861 isn't used anymore, e.g. on a completely colored graph. */
1863 static void
1864 insert_coalesced_conflicts (void)
1866 struct dlist *d;
1867 for (d = WEBS(COALESCED); 0 && d; d = d->next)
1869 struct web *web = DLIST_WEB (d);
1870 struct web *aweb = alias (web);
1871 struct conflict_link *wl;
1872 for (wl = web->conflict_list; wl; wl = wl->next)
1874 struct web *tweb = aweb;
1875 int i;
1876 int nregs = 1 + web->add_hardregs;
1877 if (aweb->type == PRECOLORED)
1878 nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
1879 for (i = 0; i < nregs; i++)
1881 if (aweb->type == PRECOLORED)
1882 tweb = hardreg2web[i + aweb->color];
1883 /* There might be some conflict edges laying around
1884 where the usable_regs don't intersect. This can happen
1885 when first some webs were coalesced and conflicts
1886 propagated, then some combining narrowed usable_regs and
1887 further coalescing ignored those conflicts. Now there are
1888 some edges to COALESCED webs but not to its alias.
1889 So assert they really don not conflict. */
1890 gcc_assert (((tweb->type == PRECOLORED
1891 || TEST_BIT (sup_igraph,
1892 tweb->id * num_webs + wl->t->id))
1893 && (wl->t->type == PRECOLORED
1894 || TEST_BIT (sup_igraph,
1895 wl->t->id * num_webs + tweb->id)))
1896 || !hard_regs_intersect_p (&tweb->usable_regs,
1897 &wl->t->usable_regs));
1898 /*if (wl->sub == NULL)
1899 record_conflict (tweb, wl->t);
1900 else
1902 struct sub_conflict *sl;
1903 for (sl = wl->sub; sl; sl = sl->next)
1904 record_conflict (tweb, sl->t);
1906 if (aweb->type != PRECOLORED)
1907 break;
1913 /* A function suitable to pass to qsort(). Compare the spill costs
1914 of webs W1 and W2. When used by qsort, this would order webs with
1915 largest cost first. */
1917 static int
1918 comp_webs_maxcost (const void *w1, const void *w2)
1920 struct web *web1 = *(struct web **)w1;
1921 struct web *web2 = *(struct web **)w2;
1922 if (web1->spill_cost > web2->spill_cost)
1923 return -1;
1924 else if (web1->spill_cost < web2->spill_cost)
1925 return 1;
1926 else
1927 return 0;
1930 /* This tries to recolor all spilled webs. See try_recolor_web()
1931 how this is done. This just calls it for each spilled web. */
1933 static void
1934 recolor_spills (void)
1936 unsigned int i, num;
1937 struct web **order2web;
1938 num = num_webs - num_subwebs;
1939 order2web = xmalloc (num * sizeof (order2web[0]));
1940 for (i = 0; i < num; i++)
1942 order2web[i] = id2web[i];
1943 /* If we aren't breaking aliases, combine() wasn't merging the
1944 spill_costs. So do that here to have sane measures. */
1945 if (!flag_ra_merge_spill_costs && id2web[i]->type == COALESCED)
1946 alias (id2web[i])->spill_cost += id2web[i]->spill_cost;
1948 qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
1949 insert_coalesced_conflicts ();
1950 dump_graph_cost (DUMP_COSTS, "before spill-recolor");
1951 for (i = 0; i < num; i++)
1953 struct web *web = order2web[i];
1954 if (web->type == SPILLED)
1955 try_recolor_web (web);
1957 /* It might have been decided in try_recolor_web() (in colorize_one_web())
1958 that a coalesced web should be spilled, so it was put on the
1959 select stack. Those webs need recoloring again, and all remaining
1960 coalesced webs might need their color updated, so simply call
1961 assign_colors() again. */
1962 assign_colors ();
1963 free (order2web);
1966 /* This checks the current color assignment for obvious errors,
1967 like two conflicting webs overlapping in colors, or the used colors
1968 not being in usable regs. */
1970 static void
1971 check_colors (void)
1973 unsigned int i;
1974 for (i = 0; i < num_webs - num_subwebs; i++)
1976 struct web *web = id2web[i];
1977 struct web *aweb = alias (web);
1978 struct conflict_link *wl;
1979 int nregs;
1981 if (web->regno >= max_normal_pseudo)
1982 continue;
1984 switch (aweb->type)
1986 case SPILLED:
1987 continue;
1989 case COLORED:
1990 nregs = hard_regno_nregs[aweb->color][GET_MODE (web->orig_x)];
1991 break;
1993 case PRECOLORED:
1994 nregs = 1;
1995 break;
1997 default:
1998 gcc_unreachable ();
2001 #ifdef ENABLE_CHECKING
2002 /* The color must be valid for the original usable_regs. */
2004 int c;
2005 for (c = 0; c < nregs; c++)
2006 gcc_assert (TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c));
2008 #endif
2009 /* Search the original (pre-coalesce) conflict list. In the current
2010 one some imprecise conflicts may be noted (due to combine() or
2011 insert_coalesced_conflicts() relocating partial conflicts) making
2012 it look like some wide webs are in conflict and having the same
2013 color. */
2014 wl = (web->have_orig_conflicts ? web->orig_conflict_list
2015 : web->conflict_list);
2016 for (; wl; wl = wl->next)
2017 if (wl->t->regno >= max_normal_pseudo)
2018 continue;
2019 else if (!wl->sub)
2021 struct web *web2 = alias (wl->t);
2022 int nregs2;
2023 if (web2->type == COLORED)
2024 nregs2 = hard_regno_nregs[web2->color][GET_MODE (web2->orig_x)];
2025 else if (web2->type == PRECOLORED)
2026 nregs2 = 1;
2027 else
2028 continue;
2029 gcc_assert (aweb->color >= web2->color + nregs2
2030 || web2->color >= aweb->color + nregs);
2031 continue;
2033 else
2035 struct sub_conflict *sl;
2036 int scol = aweb->color;
2037 int tcol = alias (wl->t)->color;
2038 if (alias (wl->t)->type == SPILLED)
2039 continue;
2040 for (sl = wl->sub; sl; sl = sl->next)
2042 int ssize = hard_regno_nregs[scol][GET_MODE (sl->s->orig_x)];
2043 int tsize = hard_regno_nregs[tcol][GET_MODE (sl->t->orig_x)];
2044 int sofs = 0, tofs = 0;
2045 if (SUBWEB_P (sl->t)
2046 && GET_MODE_SIZE (GET_MODE (sl->t->orig_x)) >= UNITS_PER_WORD)
2047 tofs = (SUBREG_BYTE (sl->t->orig_x) / UNITS_PER_WORD);
2048 if (SUBWEB_P (sl->s)
2049 && GET_MODE_SIZE (GET_MODE (sl->s->orig_x))
2050 >= UNITS_PER_WORD)
2051 sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD);
2052 gcc_assert ((tcol + tofs >= scol + sofs + ssize)
2053 || (scol + sofs >= tcol + tofs + tsize));
2054 continue;
2060 /* WEB was a coalesced web. Make it unaliased again, and put it
2061 back onto SELECT stack. */
2063 static void
2064 unalias_web (struct web *web)
2066 web->alias = NULL;
2067 web->is_coalesced = 0;
2068 web->color = -1;
2069 /* Well, initially everything was spilled, so it isn't incorrect,
2070 that also the individual parts can be spilled.
2071 XXX this isn't entirely correct, as we also relaxed the
2072 spill_temp flag in combine(), which might have made components
2073 spill, although they were a short or spilltemp web. */
2074 web->was_spilled = 1;
2075 remove_list (web->dlink, &WEBS(COALESCED));
2076 /* Spilltemps must be colored right now (i.e. as early as possible),
2077 other webs can be deferred to the end (the code building the
2078 stack assumed that in this stage only one web was colored). */
2079 if (web->spill_temp && web->spill_temp != 2)
2080 put_web (web, SELECT);
2081 else
2082 put_web_at_end (web, SELECT);
2085 /* WEB is a _target_ for coalescing which got spilled.
2086 Break all aliases to WEB, and restore some of its member to the state
2087 they were before coalescing. Due to the suboptimal structure of
2088 the interference graph we need to go through all coalesced webs.
2089 Somewhen we'll change this to be more sane. */
2091 static void
2092 break_aliases_to_web (struct web *web)
2094 struct dlist *d, *d_next;
2095 gcc_assert (web->type == SPILLED);
2096 for (d = WEBS(COALESCED); d; d = d_next)
2098 struct web *other = DLIST_WEB (d);
2099 d_next = d->next;
2100 /* Beware: Don't use alias() here. We really want to check only
2101 one level of aliasing, i.e. only break up webs directly
2102 aliased to WEB, not also those aliased through other webs. */
2103 if (other->alias == web)
2105 unalias_web (other);
2106 ra_debug_msg (DUMP_COLORIZE, " %d", other->id);
2109 web->spill_temp = web->orig_spill_temp;
2110 web->spill_cost = web->orig_spill_cost;
2111 /* Beware: The following possibly widens usable_regs again. While
2112 it was narrower there might have been some conflicts added which got
2113 ignored because of non-intersecting hardregsets. All those conflicts
2114 would now matter again. Fortunately we only add conflicts when
2115 coalescing, which is also the time of narrowing. And we remove all
2116 those added conflicts again now that we unalias this web.
2117 Therefore this is safe to do. */
2118 COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
2119 web->is_coalesced = 0;
2120 web->num_aliased = 0;
2121 web->was_spilled = 1;
2122 /* Reset is_coalesced flag for webs which itself are target of coalescing.
2123 It was cleared above if it was coalesced to WEB. */
2124 for (d = WEBS(COALESCED); d; d = d->next)
2125 DLIST_WEB (d)->alias->is_coalesced = 1;
2128 /* WEB is a web coalesced into a precolored one. Break that alias,
2129 making WEB SELECTed again. Also restores the conflicts which resulted
2130 from initially coalescing both. */
2132 static void
2133 break_precolored_alias (struct web *web)
2135 struct web *pre = web->alias;
2136 struct conflict_link *wl;
2137 unsigned int c = pre->color;
2138 unsigned int nregs = hard_regno_nregs[c][GET_MODE (web->orig_x)];
2139 gcc_assert (pre->type == PRECOLORED);
2140 unalias_web (web);
2141 /* Now we need to look at each conflict X of WEB, if it conflicts
2142 with [PRE, PRE+nregs), and remove such conflicts, of X has not other
2143 conflicts, which are coalesced into those precolored webs. */
2144 for (wl = web->conflict_list; wl; wl = wl->next)
2146 struct web *x = wl->t;
2147 struct web *y;
2148 unsigned int i;
2149 struct conflict_link *wl2;
2150 struct conflict_link **pcl;
2151 HARD_REG_SET regs;
2152 if (!x->have_orig_conflicts)
2153 continue;
2154 /* First look at which colors can not go away, due to other coalesces
2155 still existing. */
2156 CLEAR_HARD_REG_SET (regs);
2157 for (i = 0; i < nregs; i++)
2158 SET_HARD_REG_BIT (regs, c + i);
2159 for (wl2 = x->conflict_list; wl2; wl2 = wl2->next)
2160 if (wl2->t->type == COALESCED && alias (wl2->t)->type == PRECOLORED)
2161 CLEAR_HARD_REG_BIT (regs, alias (wl2->t)->color);
2162 /* Now also remove the colors of those conflicts which already
2163 were there before coalescing at all. */
2164 for (wl2 = x->orig_conflict_list; wl2; wl2 = wl2->next)
2165 if (wl2->t->type == PRECOLORED)
2166 CLEAR_HARD_REG_BIT (regs, wl2->t->color);
2167 /* The colors now still set are those for which WEB was the last
2168 cause, i.e. those which can be removed. */
2169 y = NULL;
2170 for (i = 0; i < nregs; i++)
2171 if (TEST_HARD_REG_BIT (regs, c + i))
2173 struct web *sub;
2174 y = hardreg2web[c + i];
2175 RESET_BIT (sup_igraph, x->id * num_webs + y->id);
2176 RESET_BIT (sup_igraph, y->id * num_webs + x->id);
2177 RESET_BIT (igraph, igraph_index (x->id, y->id));
2178 for (sub = x->subreg_next; sub; sub = sub->subreg_next)
2179 RESET_BIT (igraph, igraph_index (sub->id, y->id));
2181 if (!y)
2182 continue;
2183 pcl = &(x->conflict_list);
2184 while (*pcl)
2186 struct web *y = (*pcl)->t;
2187 if (y->type != PRECOLORED || !TEST_HARD_REG_BIT (regs, y->color))
2188 pcl = &((*pcl)->next);
2189 else
2190 *pcl = (*pcl)->next;
2195 /* WEB is a spilled web which was target for coalescing.
2196 Delete all interference edges which were added due to that coalescing,
2197 and break up the coalescing. */
2199 static void
2200 restore_conflicts_from_coalesce (struct web *web)
2202 struct conflict_link **pcl;
2203 struct conflict_link *wl;
2204 pcl = &(web->conflict_list);
2205 /* No original conflict list means no conflict was added at all
2206 after building the graph. So neither we nor any neighbors have
2207 conflicts due to this coalescing. */
2208 if (!web->have_orig_conflicts)
2209 return;
2210 while (*pcl)
2212 struct web *other = (*pcl)->t;
2213 for (wl = web->orig_conflict_list; wl; wl = wl->next)
2214 if (wl->t == other)
2215 break;
2216 if (wl)
2218 /* We found this conflict also in the original list, so this
2219 was no new conflict. */
2220 pcl = &((*pcl)->next);
2222 else
2224 /* This is a new conflict, so delete it from us and
2225 the neighbor. */
2226 struct conflict_link **opcl;
2227 struct conflict_link *owl;
2228 struct sub_conflict *sl;
2229 wl = *pcl;
2230 *pcl = wl->next;
2231 gcc_assert (other->have_orig_conflicts
2232 || other->type == PRECOLORED);
2233 for (owl = other->orig_conflict_list; owl; owl = owl->next)
2234 if (owl->t == web)
2235 break;
2236 gcc_assert (!owl);
2237 opcl = &(other->conflict_list);
2238 while (*opcl)
2240 if ((*opcl)->t == web)
2242 owl = *opcl;
2243 *opcl = owl->next;
2244 break;
2246 else
2248 opcl = &((*opcl)->next);
2251 gcc_assert (owl || other->type == PRECOLORED);
2252 /* wl and owl contain the edge data to be deleted. */
2253 RESET_BIT (sup_igraph, web->id * num_webs + other->id);
2254 RESET_BIT (sup_igraph, other->id * num_webs + web->id);
2255 RESET_BIT (igraph, igraph_index (web->id, other->id));
2256 for (sl = wl->sub; sl; sl = sl->next)
2257 RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2258 if (other->type != PRECOLORED)
2260 for (sl = owl->sub; sl; sl = sl->next)
2261 RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2266 /* We must restore usable_regs because record_conflict will use it. */
2267 COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
2268 /* We might have deleted some conflicts above, which really are still
2269 there (diamond pattern coalescing). This is because we don't reference
2270 count interference edges but some of them were the result of different
2271 coalesces. */
2272 for (wl = web->conflict_list; wl; wl = wl->next)
2273 if (wl->t->type == COALESCED)
2275 struct web *tweb;
2276 for (tweb = wl->t->alias; tweb; tweb = tweb->alias)
2278 if (wl->sub == NULL)
2279 record_conflict (web, tweb);
2280 else
2282 struct sub_conflict *sl;
2283 for (sl = wl->sub; sl; sl = sl->next)
2285 struct web *sweb = NULL;
2286 if (SUBWEB_P (sl->t))
2287 sweb = find_subweb (tweb, sl->t->orig_x);
2288 if (!sweb)
2289 sweb = tweb;
2290 record_conflict (sl->s, sweb);
2293 if (tweb->type != COALESCED)
2294 break;
2299 /* Repeatedly break aliases for spilled webs, which were target for
2300 coalescing, and recolorize the resulting parts. Do this as long as
2301 there are any spilled coalesce targets. */
2303 static void
2304 break_coalesced_spills (void)
2306 int changed = 0;
2307 while (1)
2309 struct dlist *d;
2310 struct web *web;
2311 for (d = WEBS(SPILLED); d; d = d->next)
2312 if (DLIST_WEB (d)->is_coalesced)
2313 break;
2314 if (!d)
2315 break;
2316 changed = 1;
2317 web = DLIST_WEB (d);
2318 ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
2319 restore_conflicts_from_coalesce (web);
2320 break_aliases_to_web (web);
2321 /* WEB was a spilled web and isn't anymore. Everything coalesced
2322 to WEB is now SELECTed and might potentially get a color.
2323 If those other webs were itself targets of coalescing it might be
2324 that there are still some conflicts from aliased webs missing,
2325 because they were added in combine() right into the now
2326 SELECTed web. So we need to add those missing conflicts here. */
2327 insert_coalesced_conflicts ();
2328 ra_debug_msg (DUMP_COLORIZE, "\n");
2329 remove_list (d, &WEBS(SPILLED));
2330 put_web (web, SELECT);
2331 web->color = -1;
2332 while (WEBS(SELECT))
2334 d = pop_list (&WEBS(SELECT));
2335 colorize_one_web (DLIST_WEB (d), 1);
2338 if (changed)
2340 struct dlist *d;
2341 for (d = WEBS(COALESCED); d; d = d->next)
2343 struct web *a = alias (DLIST_WEB (d));
2344 DLIST_WEB (d)->color = a->color;
2347 dump_graph_cost (DUMP_COSTS, "after alias-breaking");
2350 /* A structure for fast hashing of a pair of webs.
2351 Used to cumulate savings (from removing copy insns) for coalesced webs.
2352 All the pairs are also put into a single linked list. */
2353 struct web_pair
2355 struct web_pair *next_hash;
2356 struct web_pair *next_list;
2357 struct web *smaller;
2358 struct web *larger;
2359 unsigned int conflicts;
2360 unsigned HOST_WIDE_INT cost;
2363 /* The actual hash table. */
2364 #define WEB_PAIR_HASH_SIZE 8192
2365 static struct web_pair *web_pair_hash[WEB_PAIR_HASH_SIZE];
2366 static struct web_pair *web_pair_list;
2367 static unsigned int num_web_pairs;
2369 /* Clear the hash table of web pairs. */
2371 static void
2372 init_web_pairs (void)
2374 memset (web_pair_hash, 0, sizeof web_pair_hash);
2375 num_web_pairs = 0;
2376 web_pair_list = NULL;
2379 /* Given two webs connected by a move with cost COST which together
2380 have CONFLICTS conflicts, add that pair to the hash table, or if
2381 already in, cumulate the costs and conflict number. */
2383 static void
2384 add_web_pair_cost (struct web *web1, struct web *web2,
2385 unsigned HOST_WIDE_INT cost, unsigned int conflicts)
2387 unsigned int hash;
2388 struct web_pair *p;
2389 if (web1->id > web2->id)
2391 struct web *h = web1;
2392 web1 = web2;
2393 web2 = h;
2395 hash = (web1->id * num_webs + web2->id) % WEB_PAIR_HASH_SIZE;
2396 for (p = web_pair_hash[hash]; p; p = p->next_hash)
2397 if (p->smaller == web1 && p->larger == web2)
2399 p->cost += cost;
2400 p->conflicts += conflicts;
2401 return;
2403 p = ra_alloc (sizeof *p);
2404 p->next_hash = web_pair_hash[hash];
2405 p->next_list = web_pair_list;
2406 p->smaller = web1;
2407 p->larger = web2;
2408 p->conflicts = conflicts;
2409 p->cost = cost;
2410 web_pair_hash[hash] = p;
2411 web_pair_list = p;
2412 num_web_pairs++;
2415 /* Suitable to be passed to qsort(). Sort web pairs so, that those
2416 with more conflicts and higher cost (which actually is a saving
2417 when the moves are removed) come first. */
2419 static int
2420 comp_web_pairs (const void *w1, const void *w2)
2422 struct web_pair *p1 = *(struct web_pair **)w1;
2423 struct web_pair *p2 = *(struct web_pair **)w2;
2424 if (p1->conflicts > p2->conflicts)
2425 return -1;
2426 else if (p1->conflicts < p2->conflicts)
2427 return 1;
2428 else if (p1->cost > p2->cost)
2429 return -1;
2430 else if (p1->cost < p2->cost)
2431 return 1;
2432 else
2433 return 0;
2436 /* Given the list of web pairs, begin to combine them from the one
2437 with the most savings. */
2439 static void
2440 sort_and_combine_web_pairs (int for_move)
2442 unsigned int i;
2443 struct web_pair **sorted;
2444 struct web_pair *p;
2445 if (!num_web_pairs)
2446 return;
2447 sorted = xmalloc (num_web_pairs * sizeof (sorted[0]));
2448 for (p = web_pair_list, i = 0; p; p = p->next_list)
2449 sorted[i++] = p;
2450 gcc_assert (i == num_web_pairs);
2451 qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs);
2453 /* After combining one pair, we actually should adjust the savings
2454 of the other pairs, if they are connected to one of the just coalesced
2455 pair. Later. */
2456 for (i = 0; i < num_web_pairs; i++)
2458 struct web *w1, *w2;
2459 p = sorted[i];
2460 w1 = alias (p->smaller);
2461 w2 = alias (p->larger);
2462 if (!for_move && (w1->type == PRECOLORED || w2->type == PRECOLORED))
2463 continue;
2464 else if (w2->type == PRECOLORED)
2466 struct web *h = w1;
2467 w1 = w2;
2468 w2 = h;
2470 if (w1 != w2
2471 && !TEST_BIT (sup_igraph, w1->id * num_webs + w2->id)
2472 && !TEST_BIT (sup_igraph, w2->id * num_webs + w1->id)
2473 && w2->type != PRECOLORED
2474 && hard_regs_intersect_p (&w1->usable_regs, &w2->usable_regs))
2476 if (w1->type != PRECOLORED
2477 || (w1->type == PRECOLORED && ok (w2, w1)))
2478 combine (w1, w2);
2479 else if (w1->type == PRECOLORED)
2480 SET_HARD_REG_BIT (w2->prefer_colors, w1->color);
2483 free (sorted);
2486 /* Returns nonzero if source/target reg classes are ok for coalesce. */
2488 static int
2489 ok_class (struct web *target, struct web *source)
2491 /* Don't coalesce if preferred classes are different and at least one
2492 of them has a size of 1. This was preventing things such as the
2493 branch on count transformation (i.e. DoLoop) since the target, which
2494 prefers the CTR, was being coalesced with a source which preferred
2495 GENERAL_REGS. If only one web has a preferred class with 1 free reg
2496 then set it as the preferred color of the other web. */
2497 enum reg_class t_class, s_class;
2498 t_class = reg_preferred_class (target->regno);
2499 s_class = reg_preferred_class (source->regno);
2500 if (t_class != s_class)
2502 if (num_free_regs[t_class] == 1)
2504 if (num_free_regs[s_class] != 1)
2505 SET_HARD_REG_BIT (source->prefer_colors,
2506 single_reg_in_regclass[t_class]);
2507 return 0;
2509 else if (num_free_regs[s_class] == 1)
2511 SET_HARD_REG_BIT (target->prefer_colors,
2512 single_reg_in_regclass[s_class]);
2513 return 0;
2516 return 1;
2519 /* Greedily coalesce all moves possible. Begin with the web pair
2520 giving the most saving if coalesced. */
2522 static void
2523 aggressive_coalesce (void)
2525 struct dlist *d;
2526 struct move *m;
2527 init_web_pairs ();
2528 while ((d = pop_list (&mv_worklist)) != NULL)
2529 if ((m = DLIST_MOVE (d)))
2531 struct web *s = alias (m->source_web);
2532 struct web *t = alias (m->target_web);
2533 if (t->type == PRECOLORED)
2535 struct web *h = s;
2536 s = t;
2537 t = h;
2539 if (s != t
2540 && t->type != PRECOLORED
2541 && !TEST_BIT (sup_igraph, s->id * num_webs + t->id)
2542 && !TEST_BIT (sup_igraph, t->id * num_webs + s->id)
2543 && ok_class (t, s))
2545 if ((s->type == PRECOLORED && ok (t, s))
2546 || s->type != PRECOLORED)
2548 put_move (m, MV_COALESCED);
2549 add_web_pair_cost (s, t, BLOCK_FOR_INSN (m->insn)->frequency,
2552 else if (s->type == PRECOLORED)
2553 /* It is !ok(t, s). But later when coloring the graph it might
2554 be possible to take that color. So we remember the preferred
2555 color to try that first. */
2557 put_move (m, CONSTRAINED);
2558 SET_HARD_REG_BIT (t->prefer_colors, s->color);
2561 else
2563 put_move (m, CONSTRAINED);
2566 sort_and_combine_web_pairs (1);
2569 /* This is the difference between optimistic coalescing and
2570 optimistic coalescing+. Extended coalesce tries to coalesce also
2571 non-conflicting nodes, not related by a move. The criteria here is,
2572 the one web must be a source, the other a destination of the same insn.
2573 This actually makes sense, as (because they are in the same insn) they
2574 share many of their neighbors, and if they are coalesced, reduce the
2575 number of conflicts of those neighbors by one. For this we sort the
2576 candidate pairs again according to savings (and this time also conflict
2577 number).
2579 This is also a comparatively slow operation, as we need to go through
2580 all insns, and for each insn, through all defs and uses. */
2582 static void
2583 extended_coalesce_2 (void)
2585 rtx insn;
2586 struct ra_insn_info info;
2587 unsigned int n;
2588 init_web_pairs ();
2589 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2590 if (INSN_P (insn) && (info = insn_df[INSN_UID (insn)]).num_defs)
2591 for (n = 0; n < info.num_defs; n++)
2593 struct web *dest = def2web[DF_REF_ID (info.defs[n])];
2594 dest = alias (find_web_for_subweb (dest));
2595 if (dest->type != PRECOLORED && dest->regno < max_normal_pseudo)
2597 unsigned int n2;
2598 for (n2 = 0; n2 < info.num_uses; n2++)
2600 struct web *source = use2web[DF_REF_ID (info.uses[n2])];
2601 source = alias (find_web_for_subweb (source));
2602 if (source->type != PRECOLORED
2603 && source != dest
2604 && source->regno < max_normal_pseudo
2605 /* Coalesced webs end up using the same REG rtx in
2606 emit_colors(). So we can only coalesce something
2607 of equal modes. */
2608 && GET_MODE (source->orig_x) == GET_MODE (dest->orig_x)
2609 && !TEST_BIT (sup_igraph,
2610 dest->id * num_webs + source->id)
2611 && !TEST_BIT (sup_igraph,
2612 source->id * num_webs + dest->id)
2613 && ok_class (dest, source)
2614 && hard_regs_intersect_p (&source->usable_regs,
2615 &dest->usable_regs))
2616 add_web_pair_cost (dest, source,
2617 BLOCK_FOR_INSN (insn)->frequency,
2618 dest->num_conflicts
2619 + source->num_conflicts);
2623 sort_and_combine_web_pairs (0);
2626 /* Check if we forgot to coalesce some moves. */
2628 static void
2629 check_uncoalesced_moves (void)
2631 struct move_list *ml;
2632 struct move *m;
2633 for (ml = wl_moves; ml; ml = ml->next)
2634 if ((m = ml->move))
2636 struct web *s = alias (m->source_web);
2637 struct web *t = alias (m->target_web);
2638 if (t->type == PRECOLORED)
2640 struct web *h = s;
2641 s = t;
2642 t = h;
2644 gcc_assert (s == t
2645 || m->type == CONSTRAINED
2646 /* Following can happen when a move was coalesced, but
2647 later broken up again. Then s!=t, but m is still
2648 MV_COALESCED. */
2649 || m->type == MV_COALESCED
2650 || t->type == PRECOLORED
2651 || (s->type == PRECOLORED && !ok (t, s))
2652 || TEST_BIT (sup_igraph, s->id * num_webs + t->id)
2653 || TEST_BIT (sup_igraph, t->id * num_webs + s->id));
2657 /* The toplevel function in this file. Precondition is, that
2658 the interference graph is built completely by ra-build.c. This
2659 produces a list of spilled, colored and coalesced nodes. */
2661 void
2662 ra_colorize_graph (struct df *df)
2664 if (dump_file)
2665 dump_igraph (df);
2666 build_worklists (df);
2668 /* With optimistic coalescing we coalesce everything we can. */
2669 if (flag_ra_optimistic_coalescing)
2671 aggressive_coalesce ();
2672 extended_coalesce_2 ();
2675 /* Now build the select stack. */
2678 simplify ();
2679 if (mv_worklist)
2680 coalesce ();
2681 else if (WEBS(FREEZE))
2682 freeze ();
2683 else if (WEBS(SPILL))
2684 select_spill ();
2686 while (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_FAT) || WEBS(SIMPLIFY_SPILL)
2687 || mv_worklist || WEBS(FREEZE) || WEBS(SPILL));
2688 if (flag_ra_optimistic_coalescing)
2689 check_uncoalesced_moves ();
2691 /* Actually colorize the webs from the select stack. */
2692 assign_colors ();
2693 check_colors ();
2694 dump_graph_cost (DUMP_COSTS, "initially");
2695 if (flag_ra_break_aliases)
2696 break_coalesced_spills ();
2697 check_colors ();
2699 /* And try to improve the cost by recoloring spilled webs. */
2700 recolor_spills ();
2701 dump_graph_cost (DUMP_COSTS, "after spill-recolor");
2702 check_colors ();
2705 /* Initialize this module. */
2707 void ra_colorize_init (void)
2709 /* FIXME: Choose spill heuristic for platform if we have one */
2710 spill_heuristic = default_spill_heuristic;
2713 /* Free all memory. (Note that we don't need to free any per pass
2714 memory). */
2716 void
2717 ra_colorize_free_all (void)
2719 struct dlist *d;
2720 while ((d = pop_list (&WEBS(FREE))) != NULL)
2721 put_web (DLIST_WEB (d), INITIAL);
2722 while ((d = pop_list (&WEBS(INITIAL))) != NULL)
2724 struct web *web = DLIST_WEB (d);
2725 struct web *wnext;
2726 web->orig_conflict_list = NULL;
2727 web->conflict_list = NULL;
2728 for (web = web->subreg_next; web; web = wnext)
2730 wnext = web->subreg_next;
2731 free (web);
2733 free (DLIST_WEB (d));
2738 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: