2003-02-28 Aldy Hernandez <aldyh@redhat.com>
[official-gcc.git] / gcc / ra-rewrite.c
blob5cb1bb95522ada5fc04aa58b0e5f41125d7d117f
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "function.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
31 #include "df.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "except.h"
35 #include "ra.h"
36 #include "insn-config.h"
37 #include "reload.h"
39 /* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
44 struct rewrite_info;
45 struct rtx_list;
47 static void spill_coalescing PARAMS ((sbitmap, sbitmap));
48 static unsigned HOST_WIDE_INT spill_prop_savings PARAMS ((struct web *,
49 sbitmap));
50 static void spill_prop_insert PARAMS ((struct web *, sbitmap, sbitmap));
51 static int spill_propagation PARAMS ((sbitmap, sbitmap, sbitmap));
52 static void spill_coalprop PARAMS ((void));
53 static void allocate_spill_web PARAMS ((struct web *));
54 static void choose_spill_colors PARAMS ((void));
55 static void rewrite_program PARAMS ((bitmap));
56 static void remember_slot PARAMS ((struct rtx_list **, rtx));
57 static int slots_overlap_p PARAMS ((rtx, rtx));
58 static void delete_overlapping_slots PARAMS ((struct rtx_list **, rtx));
59 static int slot_member_p PARAMS ((struct rtx_list *, rtx));
60 static void insert_stores PARAMS ((bitmap));
61 static int spill_same_color_p PARAMS ((struct web *, struct web *));
62 static bool is_partly_live_1 PARAMS ((sbitmap, struct web *));
63 static void update_spill_colors PARAMS ((HARD_REG_SET *, struct web *, int));
64 static int spill_is_free PARAMS ((HARD_REG_SET *, struct web *));
65 static void emit_loads PARAMS ((struct rewrite_info *, int, rtx));
66 static void reloads_to_loads PARAMS ((struct rewrite_info *, struct ref **,
67 unsigned int, struct web **));
68 static void rewrite_program2 PARAMS ((bitmap));
69 static void mark_refs_for_checking PARAMS ((struct web *, bitmap));
70 static void detect_web_parts_to_rebuild PARAMS ((void));
71 static void delete_useless_defs PARAMS ((void));
72 static void detect_non_changed_webs PARAMS ((void));
73 static void reset_changed_flag PARAMS ((void));
75 /* For tracking some statistics, we count the number (and cost)
76 of deleted move insns. */
77 static unsigned int deleted_move_insns;
78 static unsigned HOST_WIDE_INT deleted_move_cost;
80 /* This is the spill coalescing phase. In SPILLED the IDs of all
81 already spilled webs are noted. In COALESCED the IDs of webs still
82 to check for coalescing. This tries to coalesce two webs, which were
83 spilled, are connected by a move, and don't conflict. Greatly
84 reduces memory shuffling. */
86 static void
87 spill_coalescing (coalesce, spilled)
88 sbitmap coalesce, spilled;
90 struct move_list *ml;
91 struct move *m;
92 for (ml = wl_moves; ml; ml = ml->next)
93 if ((m = ml->move) != NULL)
95 struct web *s = alias (m->source_web);
96 struct web *t = alias (m->target_web);
97 if ((TEST_BIT (spilled, s->id) && TEST_BIT (coalesce, t->id))
98 || (TEST_BIT (spilled, t->id) && TEST_BIT (coalesce, s->id)))
100 struct conflict_link *wl;
101 if (TEST_BIT (sup_igraph, s->id * num_webs + t->id)
102 || TEST_BIT (sup_igraph, t->id * num_webs + s->id)
103 || s->pattern || t->pattern)
104 continue;
106 deleted_move_insns++;
107 deleted_move_cost += BLOCK_FOR_INSN (m->insn)->frequency + 1;
108 PUT_CODE (m->insn, NOTE);
109 NOTE_LINE_NUMBER (m->insn) = NOTE_INSN_DELETED;
110 df_insn_modify (df, BLOCK_FOR_INSN (m->insn), m->insn);
112 m->target_web->target_of_spilled_move = 1;
113 if (s == t)
114 /* May be, already coalesced due to a former move. */
115 continue;
116 /* Merge the nodes S and T in the I-graph. Beware: the merging
117 of conflicts relies on the fact, that in the conflict list
118 of T all of it's conflicts are noted. This is currently not
119 the case if T would be the target of a coalesced web, because
120 then (in combine () above) only those conflicts were noted in
121 T from the web which was coalesced into T, which at the time
122 of combine() were not already on the SELECT stack or were
123 itself coalesced to something other. */
124 if (t->type != SPILLED || s->type != SPILLED)
125 abort ();
126 remove_list (t->dlink, &WEBS(SPILLED));
127 put_web (t, COALESCED);
128 t->alias = s;
129 s->is_coalesced = 1;
130 t->is_coalesced = 1;
131 merge_moves (s, t);
132 for (wl = t->conflict_list; wl; wl = wl->next)
134 struct web *pweb = wl->t;
135 if (wl->sub == NULL)
136 record_conflict (s, pweb);
137 else
139 struct sub_conflict *sl;
140 for (sl = wl->sub; sl; sl = sl->next)
142 struct web *sweb = NULL;
143 if (SUBWEB_P (sl->s))
144 sweb = find_subweb (s, sl->s->orig_x);
145 if (!sweb)
146 sweb = s;
147 record_conflict (sweb, sl->t);
150 /* No decrement_degree here, because we already have colored
151 the graph, and don't want to insert pweb into any other
152 list. */
153 pweb->num_conflicts -= 1 + t->add_hardregs;
159 /* Returns the probable saving of coalescing WEB with webs from
160 SPILLED, in terms of removed move insn cost. */
162 static unsigned HOST_WIDE_INT
163 spill_prop_savings (web, spilled)
164 struct web *web;
165 sbitmap spilled;
167 unsigned HOST_WIDE_INT savings = 0;
168 struct move_list *ml;
169 struct move *m;
170 unsigned int cost;
171 if (web->pattern)
172 return 0;
173 cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1);
174 cost += 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0);
175 for (ml = wl_moves; ml; ml = ml->next)
176 if ((m = ml->move) != NULL)
178 struct web *s = alias (m->source_web);
179 struct web *t = alias (m->target_web);
180 if (s != web)
182 struct web *h = s;
183 s = t;
184 t = h;
186 if (s != web || !TEST_BIT (spilled, t->id) || t->pattern
187 || TEST_BIT (sup_igraph, s->id * num_webs + t->id)
188 || TEST_BIT (sup_igraph, t->id * num_webs + s->id))
189 continue;
190 savings += BLOCK_FOR_INSN (m->insn)->frequency * cost;
192 return savings;
195 /* This add all IDs of colored webs, which are connected to WEB by a move
196 to LIST and PROCESSED. */
198 static void
199 spill_prop_insert (web, list, processed)
200 struct web *web;
201 sbitmap list, processed;
203 struct move_list *ml;
204 struct move *m;
205 for (ml = wl_moves; ml; ml = ml->next)
206 if ((m = ml->move) != NULL)
208 struct web *s = alias (m->source_web);
209 struct web *t = alias (m->target_web);
210 if (s != web)
212 struct web *h = s;
213 s = t;
214 t = h;
216 if (s != web || t->type != COLORED || TEST_BIT (processed, t->id))
217 continue;
218 SET_BIT (list, t->id);
219 SET_BIT (processed, t->id);
223 /* The spill propagation pass. If we have to spilled webs, the first
224 connected through a move to a colored one, and the second also connected
225 to that colored one, and this colored web is only used to connect both
226 spilled webs, it might be worthwhile to spill that colored one.
227 This is the case, if the cost of the removed copy insns (all three webs
228 could be placed into the same stack slot) is higher than the spill cost
229 of the web.
230 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
231 SPILLED the set of all spilled webs so far and PROCESSED the set
232 of all webs processed so far, so we don't do work twice. */
234 static int
235 spill_propagation (to_prop, spilled, processed)
236 sbitmap to_prop, spilled, processed;
238 int id;
239 int again = 0;
240 sbitmap list = sbitmap_alloc (num_webs);
241 sbitmap_zero (list);
243 /* First insert colored move neighbors into the candidate list. */
244 EXECUTE_IF_SET_IN_SBITMAP (to_prop, 0, id,
246 spill_prop_insert (ID2WEB (id), list, processed);
248 sbitmap_zero (to_prop);
250 /* For all candidates, see, if the savings are higher than it's
251 spill cost. */
252 while ((id = sbitmap_first_set_bit (list)) >= 0)
254 struct web *web = ID2WEB (id);
255 RESET_BIT (list, id);
256 if (spill_prop_savings (web, spilled) >= web->spill_cost)
258 /* If so, we found a new spilled web. Insert it's colored
259 move neighbors again, and mark, that we need to repeat the
260 whole mainloop of spillprog/coalescing again. */
261 remove_web_from_list (web);
262 web->color = -1;
263 put_web (web, SPILLED);
264 SET_BIT (spilled, id);
265 SET_BIT (to_prop, id);
266 spill_prop_insert (web, list, processed);
267 again = 1;
270 sbitmap_free (list);
271 return again;
274 /* The main phase to improve spill costs. This repeatedly runs
275 spill coalescing and spill propagation, until nothing changes. */
277 static void
278 spill_coalprop ()
280 sbitmap spilled, processed, to_prop;
281 struct dlist *d;
282 int again;
283 spilled = sbitmap_alloc (num_webs);
284 processed = sbitmap_alloc (num_webs);
285 to_prop = sbitmap_alloc (num_webs);
286 sbitmap_zero (spilled);
287 for (d = WEBS(SPILLED); d; d = d->next)
288 SET_BIT (spilled, DLIST_WEB (d)->id);
289 sbitmap_copy (to_prop, spilled);
290 sbitmap_zero (processed);
293 spill_coalescing (to_prop, spilled);
294 /* XXX Currently (with optimistic coalescing) spill_propagation()
295 doesn't give better code, sometimes it gives worse (but not by much)
296 code. I believe this is because of slightly wrong cost
297 measurements. Anyway right now it isn't worth the time it takes,
298 so deactivate it for now. */
299 again = 0 && spill_propagation (to_prop, spilled, processed);
301 while (again);
302 sbitmap_free (to_prop);
303 sbitmap_free (processed);
304 sbitmap_free (spilled);
307 /* Allocate a spill slot for a WEB. Currently we spill to pseudo
308 registers, to be able to track also webs for "stack slots", and also
309 to possibly colorize them. These pseudos are sometimes handled
310 in a special way, where we know, that they also can represent
311 MEM references. */
313 static void
314 allocate_spill_web (web)
315 struct web *web;
317 int regno = web->regno;
318 rtx slot;
319 if (web->stack_slot)
320 return;
321 slot = gen_reg_rtx (PSEUDO_REGNO_MODE (regno));
322 web->stack_slot = slot;
325 /* This chooses a color for all SPILLED webs for interference region
326 spilling. The heuristic isn't good in any way. */
328 static void
329 choose_spill_colors ()
331 struct dlist *d;
332 unsigned HOST_WIDE_INT *costs = (unsigned HOST_WIDE_INT *)
333 xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
334 for (d = WEBS(SPILLED); d; d = d->next)
336 struct web *web = DLIST_WEB (d);
337 struct conflict_link *wl;
338 int bestc, c;
339 HARD_REG_SET avail;
340 memset (costs, 0, FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
341 for (wl = web->conflict_list; wl; wl = wl->next)
343 struct web *pweb = wl->t;
344 if (pweb->type == COLORED || pweb->type == PRECOLORED)
345 costs[pweb->color] += pweb->spill_cost;
348 COPY_HARD_REG_SET (avail, web->usable_regs);
349 if (web->crosses_call)
351 /* Add an arbitrary constant cost to colors not usable by
352 call-crossing webs without saves/loads. */
353 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
354 if (TEST_HARD_REG_BIT (call_used_reg_set, c))
355 costs[c] += 1000;
357 bestc = -1;
358 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
359 if ((bestc < 0 || costs[bestc] > costs[c])
360 && TEST_HARD_REG_BIT (avail, c)
361 && HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno)))
363 int i, size;
364 size = HARD_REGNO_NREGS (c, PSEUDO_REGNO_MODE (web->regno));
365 for (i = 1; i < size
366 && TEST_HARD_REG_BIT (avail, c + i); i++);
367 if (i == size)
368 bestc = c;
370 web->color = bestc;
371 ra_debug_msg (DUMP_PROCESS, "choosing color %d for spilled web %d\n",
372 bestc, web->id);
375 free (costs);
378 /* For statistics sake we count the number and cost of all new loads,
379 stores and emitted rematerializations. */
380 static unsigned int emitted_spill_loads;
381 static unsigned int emitted_spill_stores;
382 static unsigned int emitted_remat;
383 static unsigned HOST_WIDE_INT spill_load_cost;
384 static unsigned HOST_WIDE_INT spill_store_cost;
385 static unsigned HOST_WIDE_INT spill_remat_cost;
387 /* In rewrite_program2() we detect if some def us useless, in the sense,
388 that the pseudo set is not live anymore at that point. The REF_IDs
389 of such defs are noted here. */
390 static bitmap useless_defs;
392 /* This is the simple and fast version of rewriting the program to
393 include spill code. It spills at every insn containing spilled
394 defs or uses. Loads are added only if flag_ra_spill_every_use is
395 nonzero, otherwise only stores will be added. This doesn't
396 support rematerialization.
397 NEW_DEATHS is filled with uids for insns, which probably contain
398 deaths. */
400 static void
401 rewrite_program (new_deaths)
402 bitmap new_deaths;
404 unsigned int i;
405 struct dlist *d;
406 bitmap b = BITMAP_XMALLOC ();
408 /* We walk over all webs, over all uses/defs. For all webs, we need
409 to look at spilled webs, and webs coalesced to spilled ones, in case
410 their alias isn't broken up, or they got spill coalesced. */
411 for (i = 0; i < 2; i++)
412 for (d = (i == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
414 struct web *web = DLIST_WEB (d);
415 struct web *aweb = alias (web);
416 unsigned int j;
417 rtx slot;
419 /* Is trivially true for spilled webs, but not for coalesced ones. */
420 if (aweb->type != SPILLED)
421 continue;
423 /* First add loads before every use, if we have to. */
424 if (flag_ra_spill_every_use)
426 bitmap_clear (b);
427 allocate_spill_web (aweb);
428 slot = aweb->stack_slot;
429 for (j = 0; j < web->num_uses; j++)
431 rtx insns, target, source;
432 rtx insn = DF_REF_INSN (web->uses[j]);
433 rtx prev = PREV_INSN (insn);
434 basic_block bb = BLOCK_FOR_INSN (insn);
435 /* Happens when spill_coalescing() deletes move insns. */
436 if (!INSN_P (insn))
437 continue;
439 /* Check that we didn't already added a load for this web
440 and insn. Happens, when the an insn uses the same web
441 multiple times. */
442 if (bitmap_bit_p (b, INSN_UID (insn)))
443 continue;
444 bitmap_set_bit (b, INSN_UID (insn));
445 target = DF_REF_REG (web->uses[j]);
446 source = slot;
447 start_sequence ();
448 if (GET_CODE (target) == SUBREG)
449 source = simplify_gen_subreg (GET_MODE (target), source,
450 GET_MODE (source),
451 SUBREG_BYTE (target));
452 ra_emit_move_insn (target, source);
453 insns = get_insns ();
454 end_sequence ();
455 emit_insn_before (insns, insn);
457 if (bb->head == insn)
458 bb->head = NEXT_INSN (prev);
459 for (insn = PREV_INSN (insn); insn != prev;
460 insn = PREV_INSN (insn))
462 set_block_for_insn (insn, bb);
463 df_insn_modify (df, bb, insn);
466 emitted_spill_loads++;
467 spill_load_cost += bb->frequency + 1;
471 /* Now emit the stores after each def.
472 If any uses were loaded from stackslots (compared to
473 rematerialized or not reloaded due to IR spilling),
474 aweb->stack_slot will be set. If not, we don't need to emit
475 any stack stores. */
476 slot = aweb->stack_slot;
477 bitmap_clear (b);
478 if (slot)
479 for (j = 0; j < web->num_defs; j++)
481 rtx insns, source, dest;
482 rtx insn = DF_REF_INSN (web->defs[j]);
483 rtx following = NEXT_INSN (insn);
484 basic_block bb = BLOCK_FOR_INSN (insn);
485 /* Happens when spill_coalescing() deletes move insns. */
486 if (!INSN_P (insn))
487 continue;
488 if (bitmap_bit_p (b, INSN_UID (insn)))
489 continue;
490 bitmap_set_bit (b, INSN_UID (insn));
491 start_sequence ();
492 source = DF_REF_REG (web->defs[j]);
493 dest = slot;
494 if (GET_CODE (source) == SUBREG)
495 dest = simplify_gen_subreg (GET_MODE (source), dest,
496 GET_MODE (dest),
497 SUBREG_BYTE (source));
498 ra_emit_move_insn (dest, source);
500 insns = get_insns ();
501 end_sequence ();
502 if (insns)
504 emit_insn_after (insns, insn);
505 if (bb->end == insn)
506 bb->end = PREV_INSN (following);
507 for (insn = insns; insn != following; insn = NEXT_INSN (insn))
509 set_block_for_insn (insn, bb);
510 df_insn_modify (df, bb, insn);
513 else
514 df_insn_modify (df, bb, insn);
515 emitted_spill_stores++;
516 spill_store_cost += bb->frequency + 1;
517 /* XXX we should set new_deaths for all inserted stores
518 whose pseudo dies here.
519 Note, that this isn't the case for _all_ stores. */
520 /* I.e. the next is wrong, and might cause some spilltemps
521 to be categorized as spilltemp2's (i.e. live over a death),
522 although they aren't. This might make them spill again,
523 which causes endlessness in the case, this insn is in fact
524 _no_ death. */
525 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
529 BITMAP_XFREE (b);
532 /* A simple list of rtx's. */
533 struct rtx_list
535 struct rtx_list *next;
536 rtx x;
539 /* Adds X to *LIST. */
541 static void
542 remember_slot (list, x)
543 struct rtx_list **list;
544 rtx x;
546 struct rtx_list *l;
547 /* PRE: X is not already in LIST. */
548 l = (struct rtx_list *) ra_alloc (sizeof (*l));
549 l->next = *list;
550 l->x = x;
551 *list = l;
554 /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
555 thereof), return nonzero, if they overlap. REGs and MEMs don't
556 overlap, and if they are MEMs they must have an easy address
557 (plus (basereg) (const_inst x)), otherwise they overlap. */
559 static int
560 slots_overlap_p (s1, s2)
561 rtx s1, s2;
563 rtx base1, base2;
564 HOST_WIDE_INT ofs1 = 0, ofs2 = 0;
565 int size1 = GET_MODE_SIZE (GET_MODE (s1));
566 int size2 = GET_MODE_SIZE (GET_MODE (s2));
567 if (GET_CODE (s1) == SUBREG)
568 ofs1 = SUBREG_BYTE (s1), s1 = SUBREG_REG (s1);
569 if (GET_CODE (s2) == SUBREG)
570 ofs2 = SUBREG_BYTE (s2), s2 = SUBREG_REG (s2);
572 if (s1 == s2)
573 return 1;
575 if (GET_CODE (s1) != GET_CODE (s2))
576 return 0;
578 if (GET_CODE (s1) == REG && GET_CODE (s2) == REG)
580 if (REGNO (s1) != REGNO (s2))
581 return 0;
582 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
583 return 0;
584 return 1;
586 if (GET_CODE (s1) != MEM || GET_CODE (s2) != MEM)
587 abort ();
588 s1 = XEXP (s1, 0);
589 s2 = XEXP (s2, 0);
590 if (GET_CODE (s1) != PLUS || GET_CODE (XEXP (s1, 0)) != REG
591 || GET_CODE (XEXP (s1, 1)) != CONST_INT)
592 return 1;
593 if (GET_CODE (s2) != PLUS || GET_CODE (XEXP (s2, 0)) != REG
594 || GET_CODE (XEXP (s2, 1)) != CONST_INT)
595 return 1;
596 base1 = XEXP (s1, 0);
597 base2 = XEXP (s2, 0);
598 if (!rtx_equal_p (base1, base2))
599 return 1;
600 ofs1 += INTVAL (XEXP (s1, 1));
601 ofs2 += INTVAL (XEXP (s2, 1));
602 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
603 return 0;
604 return 1;
607 /* This deletes from *LIST all rtx's which overlap with X in the sense
608 of slots_overlap_p(). */
610 static void
611 delete_overlapping_slots (list, x)
612 struct rtx_list **list;
613 rtx x;
615 while (*list)
617 if (slots_overlap_p ((*list)->x, x))
618 *list = (*list)->next;
619 else
620 list = &((*list)->next);
624 /* Returns nonzero, of X is member of LIST. */
626 static int
627 slot_member_p (list, x)
628 struct rtx_list *list;
629 rtx x;
631 for (;list; list = list->next)
632 if (rtx_equal_p (list->x, x))
633 return 1;
634 return 0;
637 /* A more sophisticated (and slower) method of adding the stores, than
638 rewrite_program(). This goes backward the insn stream, adding
639 stores as it goes, but only if it hasn't just added a store to the
640 same location. NEW_DEATHS is a bitmap filled with uids of insns
641 containing deaths. */
643 static void
644 insert_stores (new_deaths)
645 bitmap new_deaths;
647 rtx insn;
648 rtx last_slot = NULL_RTX;
649 struct rtx_list *slots = NULL;
651 /* We go simply backwards over basic block borders. */
652 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
654 int uid = INSN_UID (insn);
656 /* If we reach a basic block border, which has more than one
657 outgoing edge, we simply forget all already emitted stores. */
658 if (GET_CODE (insn) == BARRIER
659 || JUMP_P (insn) || can_throw_internal (insn))
661 last_slot = NULL_RTX;
662 slots = NULL;
664 if (!INSN_P (insn))
665 continue;
667 /* If this insn was not just added in this pass. */
668 if (uid < insn_df_max_uid)
670 unsigned int n;
671 rtx following = NEXT_INSN (insn);
672 basic_block bb = BLOCK_FOR_INSN (insn);
673 struct ra_insn_info info;
675 info = insn_df[uid];
676 for (n = 0; n < info.num_defs; n++)
678 struct web *web = def2web[DF_REF_ID (info.defs[n])];
679 struct web *aweb = alias (find_web_for_subweb (web));
680 rtx slot, source;
681 if (aweb->type != SPILLED || !aweb->stack_slot)
682 continue;
683 slot = aweb->stack_slot;
684 source = DF_REF_REG (info.defs[n]);
685 /* adjust_address() might generate code. */
686 start_sequence ();
687 if (GET_CODE (source) == SUBREG)
688 slot = simplify_gen_subreg (GET_MODE (source), slot,
689 GET_MODE (slot),
690 SUBREG_BYTE (source));
691 /* If we have no info about emitted stores, or it didn't
692 contain the location we intend to use soon, then
693 add the store. */
694 if ((!last_slot || !rtx_equal_p (slot, last_slot))
695 && ! slot_member_p (slots, slot))
697 rtx insns, ni;
698 last_slot = slot;
699 remember_slot (&slots, slot);
700 ra_emit_move_insn (slot, source);
701 insns = get_insns ();
702 end_sequence ();
703 if (insns)
705 emit_insn_after (insns, insn);
706 if (bb->end == insn)
707 bb->end = PREV_INSN (following);
708 for (ni = insns; ni != following; ni = NEXT_INSN (ni))
710 set_block_for_insn (ni, bb);
711 df_insn_modify (df, bb, ni);
714 else
715 df_insn_modify (df, bb, insn);
716 emitted_spill_stores++;
717 spill_store_cost += bb->frequency + 1;
718 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
720 else
722 /* Otherwise ignore insns from adjust_address() above. */
723 end_sequence ();
727 /* If we look at a load generated by the allocator, forget
728 the last emitted slot, and additionally clear all slots
729 overlapping it's source (after all, we need it again). */
730 /* XXX If we emit the stack-ref directly into the using insn the
731 following needs a change, because that is no new insn. Preferably
732 we would add some notes to the insn, what stackslots are needed
733 for it. */
734 if (uid >= last_max_uid)
736 rtx set = single_set (insn);
737 last_slot = NULL_RTX;
738 /* If this was no simple set, give up, and forget everything. */
739 if (!set)
740 slots = NULL;
741 else
743 if (1 || GET_CODE (SET_SRC (set)) == MEM)
744 delete_overlapping_slots (&slots, SET_SRC (set));
750 /* Returns 1 if both colored webs have some hardregs in common, even if
751 they are not the same width. */
753 static int
754 spill_same_color_p (web1, web2)
755 struct web *web1, *web2;
757 int c1, size1, c2, size2;
758 if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color)
759 return 0;
760 if ((c2 = alias (web2)->color) < 0 || c2 == an_unusable_color)
761 return 0;
763 size1 = web1->type == PRECOLORED
764 ? 1 : HARD_REGNO_NREGS (c1, PSEUDO_REGNO_MODE (web1->regno));
765 size2 = web2->type == PRECOLORED
766 ? 1 : HARD_REGNO_NREGS (c2, PSEUDO_REGNO_MODE (web2->regno));
767 if (c1 >= c2 + size2 || c2 >= c1 + size1)
768 return 0;
769 return 1;
772 /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
773 subwebs (or WEB itself) is live. */
775 static bool
776 is_partly_live_1 (live, web)
777 sbitmap live;
778 struct web *web;
781 if (TEST_BIT (live, web->id))
782 return 1;
783 while ((web = web->subreg_next));
784 return 0;
787 /* Fast version in case WEB has no subwebs. */
788 #define is_partly_live(live, web) ((!web->subreg_next) \
789 ? TEST_BIT (live, web->id) \
790 : is_partly_live_1 (live, web))
792 /* Change the set of currently IN_USE colors according to
793 WEB's color. Either add those colors to the hardreg set (if ADD
794 is nonzero), or remove them. */
796 static void
797 update_spill_colors (in_use, web, add)
798 HARD_REG_SET *in_use;
799 struct web *web;
800 int add;
802 int c, size;
803 if ((c = alias (find_web_for_subweb (web))->color) < 0
804 || c == an_unusable_color)
805 return;
806 size = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
807 if (SUBWEB_P (web))
809 c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)),
810 SUBREG_BYTE (web->orig_x),
811 GET_MODE (web->orig_x));
813 else if (web->type == PRECOLORED)
814 size = 1;
815 if (add)
816 for (; size--;)
817 SET_HARD_REG_BIT (*in_use, c + size);
818 else
819 for (; size--;)
820 CLEAR_HARD_REG_BIT (*in_use, c + size);
823 /* Given a set of hardregs currently IN_USE and the color C of WEB,
824 return -1 if WEB has no color, 1 of it has the unusable color,
825 0 if one of it's used hardregs are in use, and 1 otherwise.
826 Generally, if WEB can't be left colorized return 1. */
828 static int
829 spill_is_free (in_use, web)
830 HARD_REG_SET *in_use;
831 struct web *web;
833 int c, size;
834 if ((c = alias (web)->color) < 0)
835 return -1;
836 if (c == an_unusable_color)
837 return 1;
838 size = web->type == PRECOLORED
839 ? 1 : HARD_REGNO_NREGS (c, PSEUDO_REGNO_MODE (web->regno));
840 for (; size--;)
841 if (TEST_HARD_REG_BIT (*in_use, c + size))
842 return 0;
843 return 1;
847 /* Structure for passing between rewrite_program2() and emit_loads(). */
848 struct rewrite_info
850 /* The web IDs which currently would need a reload. These are
851 currently live spilled webs, whose color was still free. */
852 bitmap need_reload;
853 /* We need a scratch bitmap, but don't want to allocate one a zillion
854 times. */
855 bitmap scratch;
856 /* Web IDs of currently live webs. This are the precise IDs,
857 not just those of the superwebs. If only on part is live, only
858 that ID is placed here. */
859 sbitmap live;
860 /* An array of webs, which currently need a load added.
861 They will be emitted when seeing the first death. */
862 struct web **needed_loads;
863 /* The current number of entries in needed_loads. */
864 int nl_size;
865 /* The number of bits set in need_reload. */
866 int num_reloads;
867 /* The current set of hardregs not available. */
868 HARD_REG_SET colors_in_use;
869 /* Nonzero, if we just added some spill temps to need_reload or
870 needed_loads. In this case we don't wait for the next death
871 to emit their loads. */
872 int any_spilltemps_spilled;
873 /* Nonzero, if we currently need to emit the loads. E.g. when we
874 saw an insn containing deaths. */
875 int need_load;
878 /* The needed_loads list of RI contains some webs for which
879 we add the actual load insns here. They are added just before
880 their use last seen. NL_FIRST_RELOAD is the index of the first
881 load which is a converted reload, all other entries are normal
882 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
884 static void
885 emit_loads (ri, nl_first_reload, last_block_insn)
886 struct rewrite_info *ri;
887 int nl_first_reload;
888 rtx last_block_insn;
890 int j;
891 for (j = ri->nl_size; j;)
893 struct web *web = ri->needed_loads[--j];
894 struct web *supweb;
895 struct web *aweb;
896 rtx ni, slot, reg;
897 rtx before = NULL_RTX, after = NULL_RTX;
898 basic_block bb;
899 /* When spilltemps were spilled for the last insns, their
900 loads already are emitted, which is noted by setting
901 needed_loads[] for it to 0. */
902 if (!web)
903 continue;
904 supweb = find_web_for_subweb (web);
905 if (supweb->regno >= max_normal_pseudo)
906 abort ();
907 /* Check for web being a spilltemp, if we only want to
908 load spilltemps. Also remember, that we emitted that
909 load, which we don't need to do when we have a death,
910 because then all of needed_loads[] is emptied. */
911 if (!ri->need_load)
913 if (!supweb->spill_temp)
914 continue;
915 else
916 ri->needed_loads[j] = 0;
918 web->in_load = 0;
919 /* The adding of reloads doesn't depend on liveness. */
920 if (j < nl_first_reload && !TEST_BIT (ri->live, web->id))
921 continue;
922 aweb = alias (supweb);
923 aweb->changed = 1;
924 start_sequence ();
925 if (supweb->pattern)
927 /* XXX If we later allow non-constant sources for rematerialization
928 we must also disallow coalescing _to_ rematerialized webs
929 (at least then disallow spilling them, which we already ensure
930 when flag_ra_break_aliases), or not take the pattern but a
931 stackslot. */
932 if (aweb != supweb)
933 abort ();
934 slot = copy_rtx (supweb->pattern);
935 reg = copy_rtx (supweb->orig_x);
936 /* Sanity check. orig_x should be a REG rtx, which should be
937 shared over all RTL, so copy_rtx should have no effect. */
938 if (reg != supweb->orig_x)
939 abort ();
941 else
943 allocate_spill_web (aweb);
944 slot = aweb->stack_slot;
946 /* If we don't copy the RTL there might be some SUBREG
947 rtx shared in the next iteration although being in
948 different webs, which leads to wrong code. */
949 reg = copy_rtx (web->orig_x);
950 if (GET_CODE (reg) == SUBREG)
951 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
952 (reg));*/
953 slot = simplify_gen_subreg (GET_MODE (reg), slot, GET_MODE (slot),
954 SUBREG_BYTE (reg));
956 ra_emit_move_insn (reg, slot);
957 ni = get_insns ();
958 end_sequence ();
959 before = web->last_use_insn;
960 web->last_use_insn = NULL_RTX;
961 if (!before)
963 if (JUMP_P (last_block_insn))
964 before = last_block_insn;
965 else
966 after = last_block_insn;
968 if (after)
970 rtx foll = NEXT_INSN (after);
971 bb = BLOCK_FOR_INSN (after);
972 emit_insn_after (ni, after);
973 if (bb->end == after)
974 bb->end = PREV_INSN (foll);
975 for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni))
977 set_block_for_insn (ni, bb);
978 df_insn_modify (df, bb, ni);
981 else
983 rtx prev = PREV_INSN (before);
984 bb = BLOCK_FOR_INSN (before);
985 emit_insn_before (ni, before);
986 if (bb->head == before)
987 bb->head = NEXT_INSN (prev);
988 for (; ni != before; ni = NEXT_INSN (ni))
990 set_block_for_insn (ni, bb);
991 df_insn_modify (df, bb, ni);
994 if (supweb->pattern)
996 emitted_remat++;
997 spill_remat_cost += bb->frequency + 1;
999 else
1001 emitted_spill_loads++;
1002 spill_load_cost += bb->frequency + 1;
1004 RESET_BIT (ri->live, web->id);
1005 /* In the special case documented above only emit the reloads and
1006 one load. */
1007 if (ri->need_load == 2 && j < nl_first_reload)
1008 break;
1010 if (ri->need_load)
1011 ri->nl_size = j;
1014 /* Given a set of reloads in RI, an array of NUM_REFS references (either
1015 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
1016 (either use2web or def2web) convert some reloads to loads.
1017 This looks at the webs referenced, and how they change the set of
1018 available colors. Now put all still live webs, which needed reloads,
1019 and whose colors isn't free anymore, on the needed_loads list. */
1021 static void
1022 reloads_to_loads (ri, refs, num_refs, ref2web)
1023 struct rewrite_info *ri;
1024 struct ref **refs;
1025 unsigned int num_refs;
1026 struct web **ref2web;
1028 unsigned int n;
1029 int num_reloads = ri->num_reloads;
1030 for (n = 0; n < num_refs && num_reloads; n++)
1032 struct web *web = ref2web[DF_REF_ID (refs[n])];
1033 struct web *supweb = find_web_for_subweb (web);
1034 int is_death;
1035 int j;
1036 /* Only emit reloads when entering their interference
1037 region. A use of a spilled web never opens an
1038 interference region, independent of it's color. */
1039 if (alias (supweb)->type == SPILLED)
1040 continue;
1041 if (supweb->type == PRECOLORED
1042 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1043 continue;
1044 /* Note, that if web (and supweb) are DEFs, we already cleared
1045 the corresponding bits in live. I.e. is_death becomes true, which
1046 is what we want. */
1047 is_death = !TEST_BIT (ri->live, supweb->id);
1048 is_death &= !TEST_BIT (ri->live, web->id);
1049 if (is_death)
1051 int old_num_r = num_reloads;
1052 bitmap_clear (ri->scratch);
1053 EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j,
1055 struct web *web2 = ID2WEB (j);
1056 struct web *aweb2 = alias (find_web_for_subweb (web2));
1057 if (spill_is_free (&(ri->colors_in_use), aweb2) == 0)
1058 abort ();
1059 if (spill_same_color_p (supweb, aweb2)
1060 /* && interfere (web, web2) */)
1062 if (!web2->in_load)
1064 ri->needed_loads[ri->nl_size++] = web2;
1065 web2->in_load = 1;
1067 bitmap_set_bit (ri->scratch, j);
1068 num_reloads--;
1071 if (num_reloads != old_num_r)
1072 bitmap_operation (ri->need_reload, ri->need_reload, ri->scratch,
1073 BITMAP_AND_COMPL);
1076 ri->num_reloads = num_reloads;
1079 /* This adds loads for spilled webs to the program. It uses a kind of
1080 interference region spilling. If flag_ra_ir_spilling is zero it
1081 only uses improved chaitin spilling (adding loads only at insns
1082 containing deaths). */
1084 static void
1085 rewrite_program2 (new_deaths)
1086 bitmap new_deaths;
1088 basic_block bb = NULL;
1089 int nl_first_reload;
1090 struct rewrite_info ri;
1091 rtx insn;
1092 ri.needed_loads = (struct web **) xmalloc (num_webs * sizeof (struct web *));
1093 ri.need_reload = BITMAP_XMALLOC ();
1094 ri.scratch = BITMAP_XMALLOC ();
1095 ri.live = sbitmap_alloc (num_webs);
1096 ri.nl_size = 0;
1097 ri.num_reloads = 0;
1098 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1100 basic_block last_bb = NULL;
1101 rtx last_block_insn;
1102 int i, j;
1103 if (!INSN_P (insn))
1104 insn = prev_real_insn (insn);
1105 while (insn && !(bb = BLOCK_FOR_INSN (insn)))
1106 insn = prev_real_insn (insn);
1107 if (!insn)
1108 break;
1109 i = bb->index + 2;
1110 last_block_insn = insn;
1112 sbitmap_zero (ri.live);
1113 CLEAR_HARD_REG_SET (ri.colors_in_use);
1114 EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j,
1116 struct web *web = use2web[j];
1117 struct web *aweb = alias (find_web_for_subweb (web));
1118 /* A web is only live at end, if it isn't spilled. If we wouldn't
1119 check this, the last uses of spilled web per basic block
1120 wouldn't be detected as deaths, although they are in the final
1121 code. This would lead to cumulating many loads without need,
1122 only increasing register pressure. */
1123 /* XXX do add also spilled webs which got a color for IR spilling.
1124 Remember to not add to colors_in_use in that case. */
1125 if (aweb->type != SPILLED /*|| aweb->color >= 0*/)
1127 SET_BIT (ri.live, web->id);
1128 if (aweb->type != SPILLED)
1129 update_spill_colors (&(ri.colors_in_use), web, 1);
1133 bitmap_clear (ri.need_reload);
1134 ri.num_reloads = 0;
1135 ri.any_spilltemps_spilled = 0;
1136 if (flag_ra_ir_spilling)
1138 struct dlist *d;
1139 int pass;
1140 /* XXX If we don't add spilled nodes into live above, the following
1141 becomes an empty loop. */
1142 for (pass = 0; pass < 2; pass++)
1143 for (d = (pass) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1145 struct web *web = DLIST_WEB (d);
1146 struct web *aweb = alias (web);
1147 if (aweb->type != SPILLED)
1148 continue;
1149 if (is_partly_live (ri.live, web)
1150 && spill_is_free (&(ri.colors_in_use), web) > 0)
1152 ri.num_reloads++;
1153 bitmap_set_bit (ri.need_reload, web->id);
1154 /* Last using insn is somewhere in another block. */
1155 web->last_use_insn = NULL_RTX;
1160 last_bb = bb;
1161 for (; insn; insn = PREV_INSN (insn))
1163 struct ra_insn_info info;
1164 unsigned int n;
1166 if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb)
1168 int index = BLOCK_FOR_INSN (insn)->index + 2;
1169 EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j,
1171 struct web *web = use2web[j];
1172 struct web *aweb = alias (find_web_for_subweb (web));
1173 if (aweb->type != SPILLED)
1175 SET_BIT (ri.live, web->id);
1176 update_spill_colors (&(ri.colors_in_use), web, 1);
1179 bitmap_clear (ri.scratch);
1180 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
1182 struct web *web2 = ID2WEB (j);
1183 struct web *supweb2 = find_web_for_subweb (web2);
1184 struct web *aweb2 = alias (supweb2);
1185 if (spill_is_free (&(ri.colors_in_use), aweb2) <= 0)
1187 if (!web2->in_load)
1189 ri.needed_loads[ri.nl_size++] = web2;
1190 web2->in_load = 1;
1192 bitmap_set_bit (ri.scratch, j);
1193 ri.num_reloads--;
1196 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1197 BITMAP_AND_COMPL);
1198 last_bb = BLOCK_FOR_INSN (insn);
1199 last_block_insn = insn;
1200 if (!INSN_P (last_block_insn))
1201 last_block_insn = prev_real_insn (last_block_insn);
1204 ri.need_load = 0;
1205 if (INSN_P (insn))
1206 info = insn_df[INSN_UID (insn)];
1208 if (INSN_P (insn))
1209 for (n = 0; n < info.num_defs; n++)
1211 struct ref *ref = info.defs[n];
1212 struct web *web = def2web[DF_REF_ID (ref)];
1213 struct web *supweb = find_web_for_subweb (web);
1214 int is_non_def = 0;
1215 unsigned int n2;
1217 supweb = find_web_for_subweb (web);
1218 /* Webs which are defined here, but also used in the same insn
1219 are rmw webs, or this use isn't a death because of looping
1220 constructs. In neither case makes this def available it's
1221 resources. Reloads for it are still needed, it's still
1222 live and it's colors don't become free. */
1223 for (n2 = 0; n2 < info.num_uses; n2++)
1225 struct web *web2 = use2web[DF_REF_ID (info.uses[n2])];
1226 if (supweb == find_web_for_subweb (web2))
1228 is_non_def = 1;
1229 break;
1232 if (is_non_def)
1233 continue;
1235 if (!is_partly_live (ri.live, supweb))
1236 bitmap_set_bit (useless_defs, DF_REF_ID (ref));
1238 RESET_BIT (ri.live, web->id);
1239 if (bitmap_bit_p (ri.need_reload, web->id))
1241 ri.num_reloads--;
1242 bitmap_clear_bit (ri.need_reload, web->id);
1244 if (web != supweb)
1246 /* XXX subwebs aren't precisely tracked here. We have
1247 everything we need (inverse webs), but the code isn't
1248 yet written. We need to make all completely
1249 overlapping web parts non-live here. */
1250 /* If by luck now the whole web isn't live anymore, no
1251 reloads for it are needed. */
1252 if (!is_partly_live (ri.live, supweb)
1253 && bitmap_bit_p (ri.need_reload, supweb->id))
1255 ri.num_reloads--;
1256 bitmap_clear_bit (ri.need_reload, supweb->id);
1259 else
1261 struct web *sweb;
1262 /* If the whole web is defined here, no parts of it are
1263 live anymore and no reloads are needed for them. */
1264 for (sweb = supweb->subreg_next; sweb;
1265 sweb = sweb->subreg_next)
1267 RESET_BIT (ri.live, sweb->id);
1268 if (bitmap_bit_p (ri.need_reload, sweb->id))
1270 ri.num_reloads--;
1271 bitmap_clear_bit (ri.need_reload, sweb->id);
1275 if (alias (supweb)->type != SPILLED)
1276 update_spill_colors (&(ri.colors_in_use), web, 0);
1279 nl_first_reload = ri.nl_size;
1281 /* CALL_INSNs are not really deaths, but still more registers
1282 are free after a call, than before.
1283 XXX Note, that sometimes reload barfs when we emit insns between
1284 a call and the insn which copies the return register into a
1285 pseudo. */
1286 if (GET_CODE (insn) == CALL_INSN)
1287 ri.need_load = 1;
1288 else if (INSN_P (insn))
1289 for (n = 0; n < info.num_uses; n++)
1291 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1292 struct web *supweb = find_web_for_subweb (web);
1293 int is_death;
1294 if (supweb->type == PRECOLORED
1295 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1296 continue;
1297 is_death = !TEST_BIT (ri.live, supweb->id);
1298 is_death &= !TEST_BIT (ri.live, web->id);
1299 if (is_death)
1301 ri.need_load = 1;
1302 bitmap_set_bit (new_deaths, INSN_UID (insn));
1303 break;
1307 if (INSN_P (insn) && ri.num_reloads)
1309 int old_num_reloads = ri.num_reloads;
1310 reloads_to_loads (&ri, info.uses, info.num_uses, use2web);
1312 /* If this insn sets a pseudo, which isn't used later
1313 (i.e. wasn't live before) it is a dead store. We need
1314 to emit all reloads which have the same color as this def.
1315 We don't need to check for non-liveness here to detect
1316 the deadness (it anyway is too late, as we already cleared
1317 the liveness in the first loop over the defs), because if it
1318 _would_ be live here, no reload could have that color, as
1319 they would already have been converted to a load. */
1320 if (ri.num_reloads)
1321 reloads_to_loads (&ri, info.defs, info.num_defs, def2web);
1322 if (ri.num_reloads != old_num_reloads && !ri.need_load)
1323 ri.need_load = 1;
1326 if (ri.nl_size && (ri.need_load || ri.any_spilltemps_spilled))
1327 emit_loads (&ri, nl_first_reload, last_block_insn);
1329 if (INSN_P (insn) && flag_ra_ir_spilling)
1330 for (n = 0; n < info.num_uses; n++)
1332 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1333 struct web *aweb = alias (find_web_for_subweb (web));
1334 if (aweb->type != SPILLED)
1335 update_spill_colors (&(ri.colors_in_use), web, 1);
1338 ri.any_spilltemps_spilled = 0;
1339 if (INSN_P (insn))
1340 for (n = 0; n < info.num_uses; n++)
1342 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1343 struct web *supweb = find_web_for_subweb (web);
1344 struct web *aweb = alias (supweb);
1345 SET_BIT (ri.live, web->id);
1346 if (aweb->type != SPILLED)
1347 continue;
1348 if (supweb->spill_temp)
1349 ri.any_spilltemps_spilled = 1;
1350 web->last_use_insn = insn;
1351 if (!web->in_load)
1353 if (spill_is_free (&(ri.colors_in_use), aweb) <= 0
1354 || !flag_ra_ir_spilling)
1356 ri.needed_loads[ri.nl_size++] = web;
1357 web->in_load = 1;
1358 web->one_load = 1;
1360 else if (!bitmap_bit_p (ri.need_reload, web->id))
1362 bitmap_set_bit (ri.need_reload, web->id);
1363 ri.num_reloads++;
1364 web->one_load = 1;
1366 else
1367 web->one_load = 0;
1369 else
1370 web->one_load = 0;
1373 if (GET_CODE (insn) == CODE_LABEL)
1374 break;
1377 nl_first_reload = ri.nl_size;
1378 if (ri.num_reloads)
1380 int in_ir = 0;
1381 edge e;
1382 int num = 0;
1383 HARD_REG_SET cum_colors, colors;
1384 CLEAR_HARD_REG_SET (cum_colors);
1385 for (e = bb->pred; e && num < 5; e = e->pred_next, num++)
1387 int j;
1388 CLEAR_HARD_REG_SET (colors);
1389 EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j,
1391 struct web *web = use2web[j];
1392 struct web *aweb = alias (find_web_for_subweb (web));
1393 if (aweb->type != SPILLED)
1394 update_spill_colors (&colors, web, 1);
1396 IOR_HARD_REG_SET (cum_colors, colors);
1398 if (num == 5)
1399 in_ir = 1;
1401 bitmap_clear (ri.scratch);
1402 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j,
1404 struct web *web2 = ID2WEB (j);
1405 struct web *supweb2 = find_web_for_subweb (web2);
1406 struct web *aweb2 = alias (supweb2);
1407 /* block entry is IR boundary for aweb2?
1408 Currently more some tries for good conditions. */
1409 if (((ra_pass > 0 || supweb2->target_of_spilled_move)
1410 && (1 || in_ir || spill_is_free (&cum_colors, aweb2) <= 0))
1411 || (ra_pass == 1
1412 && (in_ir
1413 || spill_is_free (&cum_colors, aweb2) <= 0)))
1415 if (!web2->in_load)
1417 ri.needed_loads[ri.nl_size++] = web2;
1418 web2->in_load = 1;
1420 bitmap_set_bit (ri.scratch, j);
1421 ri.num_reloads--;
1424 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1425 BITMAP_AND_COMPL);
1428 ri.need_load = 1;
1429 emit_loads (&ri, nl_first_reload, last_block_insn);
1430 if (ri.nl_size != 0 /*|| ri.num_reloads != 0*/)
1431 abort ();
1432 if (!insn)
1433 break;
1435 free (ri.needed_loads);
1436 sbitmap_free (ri.live);
1437 BITMAP_XFREE (ri.scratch);
1438 BITMAP_XFREE (ri.need_reload);
1441 /* WEBS is a web conflicting with a spilled one. Prepare it
1442 to be able to rescan it in the next pass. Mark all it's uses
1443 for checking, and clear the some members of their web parts
1444 (of defs and uses). Notably don't clear the uplink. We don't
1445 change the layout of this web, just it's conflicts.
1446 Also remember all IDs of its uses in USES_AS_BITMAP. */
1448 static void
1449 mark_refs_for_checking (web, uses_as_bitmap)
1450 struct web *web;
1451 bitmap uses_as_bitmap;
1453 unsigned int i;
1454 for (i = 0; i < web->num_uses; i++)
1456 unsigned int id = DF_REF_ID (web->uses[i]);
1457 SET_BIT (last_check_uses, id);
1458 bitmap_set_bit (uses_as_bitmap, id);
1459 web_parts[df->def_id + id].spanned_deaths = 0;
1460 web_parts[df->def_id + id].crosses_call = 0;
1462 for (i = 0; i < web->num_defs; i++)
1464 unsigned int id = DF_REF_ID (web->defs[i]);
1465 web_parts[id].spanned_deaths = 0;
1466 web_parts[id].crosses_call = 0;
1470 /* The last step of the spill phase is to set up the structures for
1471 incrementally rebuilding the interference graph. We break up
1472 the web part structure of all spilled webs, mark their uses for
1473 rechecking, look at their neighbors, and clean up some global
1474 information, we will rebuild. */
1476 static void
1477 detect_web_parts_to_rebuild ()
1479 bitmap uses_as_bitmap;
1480 unsigned int i, pass;
1481 struct dlist *d;
1482 sbitmap already_webs = sbitmap_alloc (num_webs);
1484 uses_as_bitmap = BITMAP_XMALLOC ();
1485 if (last_check_uses)
1486 sbitmap_free (last_check_uses);
1487 last_check_uses = sbitmap_alloc (df->use_id);
1488 sbitmap_zero (last_check_uses);
1489 sbitmap_zero (already_webs);
1490 /* We need to recheck all uses of all webs involved in spilling (and the
1491 uses added by spill insns, but those are not analyzed yet).
1492 Those are the spilled webs themselves, webs coalesced to spilled ones,
1493 and webs conflicting with any of them. */
1494 for (pass = 0; pass < 2; pass++)
1495 for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1497 struct web *web = DLIST_WEB (d);
1498 struct conflict_link *wl;
1499 unsigned int j;
1500 /* This check is only needed for coalesced nodes, but hey. */
1501 if (alias (web)->type != SPILLED)
1502 continue;
1504 /* For the spilled web itself we also need to clear it's
1505 uplink, to be able to rebuild smaller webs. After all
1506 spilling has split the web. */
1507 for (i = 0; i < web->num_uses; i++)
1509 unsigned int id = DF_REF_ID (web->uses[i]);
1510 SET_BIT (last_check_uses, id);
1511 bitmap_set_bit (uses_as_bitmap, id);
1512 web_parts[df->def_id + id].uplink = NULL;
1513 web_parts[df->def_id + id].spanned_deaths = 0;
1514 web_parts[df->def_id + id].crosses_call = 0;
1516 for (i = 0; i < web->num_defs; i++)
1518 unsigned int id = DF_REF_ID (web->defs[i]);
1519 web_parts[id].uplink = NULL;
1520 web_parts[id].spanned_deaths = 0;
1521 web_parts[id].crosses_call = 0;
1524 /* Now look at all neighbors of this spilled web. */
1525 if (web->have_orig_conflicts)
1526 wl = web->orig_conflict_list;
1527 else
1528 wl = web->conflict_list;
1529 for (; wl; wl = wl->next)
1531 if (TEST_BIT (already_webs, wl->t->id))
1532 continue;
1533 SET_BIT (already_webs, wl->t->id);
1534 mark_refs_for_checking (wl->t, uses_as_bitmap);
1536 EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j,
1538 struct web *web2 = ID2WEB (j);
1539 if (TEST_BIT (already_webs, web2->id))
1540 continue;
1541 SET_BIT (already_webs, web2->id);
1542 mark_refs_for_checking (web2, uses_as_bitmap);
1546 /* We also recheck unconditionally all uses of any hardregs. This means
1547 we _can_ delete all these uses from the live_at_end[] bitmaps.
1548 And because we sometimes delete insn refering to hardregs (when
1549 they became useless because they setup a rematerializable pseudo, which
1550 then was rematerialized), some of those uses will go away with the next
1551 df_analyse(). This means we even _must_ delete those uses from
1552 the live_at_end[] bitmaps. For simplicity we simply delete
1553 all of them. */
1554 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1555 if (!fixed_regs[i])
1557 struct df_link *link;
1558 for (link = df->regs[i].uses; link; link = link->next)
1559 if (link->ref)
1560 bitmap_set_bit (uses_as_bitmap, DF_REF_ID (link->ref));
1563 /* The information in live_at_end[] will be rebuild for all uses
1564 we recheck, so clear it here (the uses of spilled webs, might
1565 indeed not become member of it again). */
1566 live_at_end -= 2;
1567 for (i = 0; i < (unsigned int) last_basic_block + 2; i++)
1568 bitmap_operation (live_at_end[i], live_at_end[i], uses_as_bitmap,
1569 BITMAP_AND_COMPL);
1570 live_at_end += 2;
1572 if (rtl_dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
1574 ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n");
1575 dump_sbitmap_file (rtl_dump_file, last_check_uses);
1577 sbitmap_free (already_webs);
1578 BITMAP_XFREE (uses_as_bitmap);
1581 /* Statistics about deleted insns, which are useless now. */
1582 static unsigned int deleted_def_insns;
1583 static unsigned HOST_WIDE_INT deleted_def_cost;
1585 /* In rewrite_program2() we noticed, when a certain insn set a pseudo
1586 which wasn't live. Try to delete all those insns. */
1588 static void
1589 delete_useless_defs ()
1591 unsigned int i;
1592 /* If the insn only sets the def without any sideeffect (besides
1593 clobbers or uses), we can delete it. single_set() also tests
1594 for INSN_P(insn). */
1595 EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i,
1597 rtx insn = DF_REF_INSN (df->defs[i]);
1598 rtx set = single_set (insn);
1599 struct web *web = find_web_for_subweb (def2web[i]);
1600 if (set && web->type == SPILLED && web->stack_slot == NULL)
1602 deleted_def_insns++;
1603 deleted_def_cost += BLOCK_FOR_INSN (insn)->frequency + 1;
1604 PUT_CODE (insn, NOTE);
1605 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
1606 df_insn_modify (df, BLOCK_FOR_INSN (insn), insn);
1611 /* Look for spilled webs, on whose behalf no insns were emitted.
1612 We inversify (sp?) the changed flag of the webs, so after this function
1613 a nonzero changed flag means, that this web was not spillable (at least
1614 in this pass). */
1616 static void
1617 detect_non_changed_webs ()
1619 struct dlist *d, *d_next;
1620 for (d = WEBS(SPILLED); d; d = d_next)
1622 struct web *web = DLIST_WEB (d);
1623 d_next = d->next;
1624 if (!web->changed)
1626 ra_debug_msg (DUMP_PROCESS, "no insns emitted for spilled web %d\n",
1627 web->id);
1628 remove_web_from_list (web);
1629 put_web (web, COLORED);
1630 web->changed = 1;
1632 else
1633 web->changed = 0;
1634 /* From now on web->changed is used as the opposite flag.
1635 I.e. colored webs, which have changed set were formerly
1636 spilled webs for which no insns were emitted. */
1640 /* Before spilling we clear the changed flags for all spilled webs. */
1642 static void
1643 reset_changed_flag ()
1645 struct dlist *d;
1646 for (d = WEBS(SPILLED); d; d = d->next)
1647 DLIST_WEB(d)->changed = 0;
1650 /* The toplevel function for this file. Given a colorized graph,
1651 and lists of spilled, coalesced and colored webs, we add some
1652 spill code. This also sets up the structures for incrementally
1653 building the interference graph in the next pass. */
1655 void
1656 actual_spill ()
1658 int i;
1659 bitmap new_deaths = BITMAP_XMALLOC ();
1660 reset_changed_flag ();
1661 spill_coalprop ();
1662 choose_spill_colors ();
1663 useless_defs = BITMAP_XMALLOC ();
1664 if (flag_ra_improved_spilling)
1665 rewrite_program2 (new_deaths);
1666 else
1667 rewrite_program (new_deaths);
1668 insert_stores (new_deaths);
1669 delete_useless_defs ();
1670 BITMAP_XFREE (useless_defs);
1671 sbitmap_free (insns_with_deaths);
1672 insns_with_deaths = sbitmap_alloc (get_max_uid ());
1673 death_insns_max_uid = get_max_uid ();
1674 sbitmap_zero (insns_with_deaths);
1675 EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i,
1676 { SET_BIT (insns_with_deaths, i);});
1677 detect_non_changed_webs ();
1678 detect_web_parts_to_rebuild ();
1679 BITMAP_XFREE (new_deaths);
1682 /* A bitmap of pseudo reg numbers which are coalesced directly
1683 to a hardreg. Set in emit_colors(), used and freed in
1684 remove_suspicious_death_notes(). */
1685 static bitmap regnos_coalesced_to_hardregs;
1687 /* Create new pseudos for each web we colored, change insns to
1688 use those pseudos and set up ra_reg_renumber. */
1690 void
1691 emit_colors (df)
1692 struct df *df;
1694 unsigned int i;
1695 int si;
1696 struct web *web;
1697 int old_max_regno = max_reg_num ();
1698 regset old_regs;
1699 basic_block bb;
1701 /* This bitmap is freed in remove_suspicious_death_notes(),
1702 which is also the user of it. */
1703 regnos_coalesced_to_hardregs = BITMAP_XMALLOC ();
1704 /* First create the (REG xx) rtx's for all webs, as we need to know
1705 the number, to make sure, flow has enough memory for them in the
1706 various tables. */
1707 for (i = 0; i < num_webs - num_subwebs; i++)
1709 web = ID2WEB (i);
1710 if (web->type != COLORED && web->type != COALESCED)
1711 continue;
1712 if (web->type == COALESCED && alias (web)->type == COLORED)
1713 continue;
1714 if (web->reg_rtx || web->regno < FIRST_PSEUDO_REGISTER)
1715 abort ();
1717 if (web->regno >= max_normal_pseudo)
1719 rtx place;
1720 if (web->color == an_unusable_color)
1722 unsigned int inherent_size = PSEUDO_REGNO_BYTES (web->regno);
1723 unsigned int total_size = MAX (inherent_size, 0);
1724 place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno),
1725 total_size,
1726 inherent_size == total_size ? 0 : -1);
1727 RTX_UNCHANGING_P (place) =
1728 RTX_UNCHANGING_P (regno_reg_rtx[web->regno]);
1729 set_mem_alias_set (place, new_alias_set ());
1731 else
1733 place = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1735 web->reg_rtx = place;
1737 else
1739 /* Special case for i386 'fix_truncdi_nomemory' insn.
1740 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1741 Actual only for clobbered register. */
1742 if (web->num_uses == 0 && web->num_defs == 1)
1743 web->reg_rtx = gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web->defs[0])));
1744 else
1745 web->reg_rtx = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1746 /* Remember the different parts directly coalesced to a hardreg. */
1747 if (web->type == COALESCED)
1748 bitmap_set_bit (regnos_coalesced_to_hardregs, REGNO (web->reg_rtx));
1751 ra_max_regno = max_regno = max_reg_num ();
1752 allocate_reg_info (max_regno, FALSE, FALSE);
1753 ra_reg_renumber = (short *) xmalloc (max_regno * sizeof (short));
1754 for (si = 0; si < max_regno; si++)
1755 ra_reg_renumber[si] = -1;
1757 /* Then go through all references, and replace them by a new
1758 pseudoreg for each web. All uses. */
1759 /* XXX
1760 Beware: The order of replacements (first uses, then defs) matters only
1761 for read-mod-write insns, where the RTL expression for the REG is
1762 shared between def and use. For normal rmw insns we connected all such
1763 webs, i.e. both the use and the def (which are the same memory)
1764 there get the same new pseudo-reg, so order would not matter.
1765 _However_ we did not connect webs, were the read cycle was an
1766 uninitialized read. If we now would first replace the def reference
1767 and then the use ref, we would initialize it with a REG rtx, which
1768 gets never initialized, and yet more wrong, which would overwrite
1769 the definition of the other REG rtx. So we must replace the defs last.
1771 for (i = 0; i < df->use_id; i++)
1772 if (df->uses[i])
1774 regset rs = DF_REF_BB (df->uses[i])->global_live_at_start;
1775 rtx regrtx;
1776 web = use2web[i];
1777 web = find_web_for_subweb (web);
1778 if (web->type != COLORED && web->type != COALESCED)
1779 continue;
1780 regrtx = alias (web)->reg_rtx;
1781 if (!regrtx)
1782 regrtx = web->reg_rtx;
1783 *DF_REF_REAL_LOC (df->uses[i]) = regrtx;
1784 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1786 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1787 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1791 /* And all defs. */
1792 for (i = 0; i < df->def_id; i++)
1794 regset rs;
1795 rtx regrtx;
1796 if (!df->defs[i])
1797 continue;
1798 rs = DF_REF_BB (df->defs[i])->global_live_at_start;
1799 web = def2web[i];
1800 web = find_web_for_subweb (web);
1801 if (web->type != COLORED && web->type != COALESCED)
1802 continue;
1803 regrtx = alias (web)->reg_rtx;
1804 if (!regrtx)
1805 regrtx = web->reg_rtx;
1806 *DF_REF_REAL_LOC (df->defs[i]) = regrtx;
1807 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1809 /* Don't simply clear the current regno, as it might be
1810 replaced by two webs. */
1811 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1812 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1816 /* And now set up the ra_reg_renumber array for reload with all the new
1817 pseudo-regs. */
1818 for (i = 0; i < num_webs - num_subwebs; i++)
1820 web = ID2WEB (i);
1821 if (web->reg_rtx && REG_P (web->reg_rtx))
1823 int r = REGNO (web->reg_rtx);
1824 ra_reg_renumber[r] = web->color;
1825 ra_debug_msg (DUMP_COLORIZE, "Renumber pseudo %d (== web %d) to %d\n",
1826 r, web->id, ra_reg_renumber[r]);
1830 old_regs = BITMAP_XMALLOC ();
1831 for (si = FIRST_PSEUDO_REGISTER; si < old_max_regno; si++)
1832 SET_REGNO_REG_SET (old_regs, si);
1833 FOR_EACH_BB (bb)
1835 AND_COMPL_REG_SET (bb->global_live_at_start, old_regs);
1836 AND_COMPL_REG_SET (bb->global_live_at_end, old_regs);
1838 BITMAP_XFREE (old_regs);
1841 /* Delete some coalesced moves from the insn stream. */
1843 void
1844 delete_moves ()
1846 struct move_list *ml;
1847 struct web *s, *t;
1848 /* XXX Beware: We normally would test here each copy insn, if
1849 source and target got the same color (either by coalescing or by pure
1850 luck), and then delete it.
1851 This will currently not work. One problem is, that we don't color
1852 the regs ourself, but instead defer to reload. So the colorization
1853 is only a kind of suggestion, which reload doesn't have to follow.
1854 For webs which are coalesced to a normal colored web, we only have one
1855 new pseudo, so in this case we indeed can delete copy insns involving
1856 those (because even if reload colors them different from our suggestion,
1857 it still has to color them the same, as only one pseudo exists). But for
1858 webs coalesced to precolored ones, we have not a single pseudo, but
1859 instead one for each coalesced web. This means, that we can't delete
1860 copy insns, where source and target are webs coalesced to precolored
1861 ones, because then the connection between both webs is destroyed. Note
1862 that this not only means copy insns, where one side is the precolored one
1863 itself, but also those between webs which are coalesced to one color.
1864 Also because reload we can't delete copy insns which involve any
1865 precolored web at all. These often have also special meaning (e.g.
1866 copying a return value of a call to a pseudo, or copying pseudo to the
1867 return register), and the deletion would confuse reload in thinking the
1868 pseudo isn't needed. One of those days reload will get away and we can
1869 do everything we want.
1870 In effect because of the later reload, we can't base our deletion on the
1871 colors itself, but instead need to base them on the newly created
1872 pseudos. */
1873 for (ml = wl_moves; ml; ml = ml->next)
1874 /* The real condition we would ideally use is: s->color == t->color.
1875 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1876 we want to prevent deletion of "special" copies. */
1877 if (ml->move
1878 && (s = alias (ml->move->source_web))->reg_rtx
1879 == (t = alias (ml->move->target_web))->reg_rtx
1880 && s->type != PRECOLORED && t->type != PRECOLORED)
1882 basic_block bb = BLOCK_FOR_INSN (ml->move->insn);
1883 df_insn_delete (df, bb, ml->move->insn);
1884 deleted_move_insns++;
1885 deleted_move_cost += bb->frequency + 1;
1889 /* Due to reasons documented elsewhere we create different pseudos
1890 for all webs coalesced to hardregs. For these parts life_analysis()
1891 might have added REG_DEAD notes without considering, that only this part
1892 but not the whole coalesced web dies. The RTL is correct, there is no
1893 coalescing yet. But if later reload's alter_reg() substitutes the
1894 hardreg into the REG rtx it looks like that particular hardreg dies here,
1895 although (due to coalescing) it still is live. This might make different
1896 places of reload think, it can use that hardreg for reload regs,
1897 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1898 (Or better teach life_analysis() and reload about our coalescing, but
1899 that comes later) Bah. */
1901 void
1902 remove_suspicious_death_notes ()
1904 rtx insn;
1905 for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
1906 if (INSN_P (insn))
1908 rtx *pnote = &REG_NOTES (insn);
1909 while (*pnote)
1911 rtx note = *pnote;
1912 if ((REG_NOTE_KIND (note) == REG_DEAD
1913 || REG_NOTE_KIND (note) == REG_UNUSED)
1914 && (GET_CODE (XEXP (note, 0)) == REG
1915 && bitmap_bit_p (regnos_coalesced_to_hardregs,
1916 REGNO (XEXP (note, 0)))))
1917 *pnote = XEXP (note, 1);
1918 else
1919 pnote = &XEXP (*pnote, 1);
1922 BITMAP_XFREE (regnos_coalesced_to_hardregs);
1923 regnos_coalesced_to_hardregs = NULL;
1926 /* Allocate space for max_reg_num() pseudo registers, and
1927 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1928 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1930 void
1931 setup_renumber (free_it)
1932 int free_it;
1934 int i;
1935 max_regno = max_reg_num ();
1936 allocate_reg_info (max_regno, FALSE, TRUE);
1937 for (i = 0; i < max_regno; i++)
1939 reg_renumber[i] = (i < ra_max_regno) ? ra_reg_renumber[i] : -1;
1941 if (free_it)
1943 free (ra_reg_renumber);
1944 ra_reg_renumber = NULL;
1945 ra_max_regno = 0;
1949 /* Dump the costs and savings due to spilling, i.e. of added spill insns
1950 and removed moves or useless defs. */
1952 void
1953 dump_cost (level)
1954 unsigned int level;
1956 ra_debug_msg (level, "Instructions for spilling\n added:\n");
1957 ra_debug_msg (level, " loads =%d cost=", emitted_spill_loads);
1958 ra_debug_msg (level, HOST_WIDE_INT_PRINT_UNSIGNED, spill_load_cost);
1959 ra_debug_msg (level, "\n stores=%d cost=", emitted_spill_stores);
1960 ra_debug_msg (level, HOST_WIDE_INT_PRINT_UNSIGNED, spill_store_cost);
1961 ra_debug_msg (level, "\n remat =%d cost=", emitted_remat);
1962 ra_debug_msg (level, HOST_WIDE_INT_PRINT_UNSIGNED, spill_remat_cost);
1963 ra_debug_msg (level, "\n removed:\n moves =%d cost=", deleted_move_insns);
1964 ra_debug_msg (level, HOST_WIDE_INT_PRINT_UNSIGNED, deleted_move_cost);
1965 ra_debug_msg (level, "\n others=%d cost=", deleted_def_insns);
1966 ra_debug_msg (level, HOST_WIDE_INT_PRINT_UNSIGNED, deleted_def_cost);
1967 ra_debug_msg (level, "\n");
1970 /* Initialization of the rewrite phase. */
1972 void
1973 ra_rewrite_init ()
1975 emitted_spill_loads = 0;
1976 emitted_spill_stores = 0;
1977 emitted_remat = 0;
1978 spill_load_cost = 0;
1979 spill_store_cost = 0;
1980 spill_remat_cost = 0;
1981 deleted_move_insns = 0;
1982 deleted_move_cost = 0;
1983 deleted_def_insns = 0;
1984 deleted_def_cost = 0;
1988 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: