* configure.ac: Don't test for [build] __cxa_atexit when building a
[official-gcc.git] / gcc / ra-rewrite.c
blobfa00e3706482d6bd4181b494c0f78ba4f9b8bf7b
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "function.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
31 #include "df.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "except.h"
35 #include "ra.h"
36 #include "insn-config.h"
37 #include "reload.h"
39 /* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
44 struct rewrite_info;
45 struct rtx_list;
47 static void spill_coalescing (sbitmap, sbitmap);
48 static unsigned HOST_WIDE_INT spill_prop_savings (struct web *, sbitmap);
49 static void spill_prop_insert (struct web *, sbitmap, sbitmap);
50 static int spill_propagation (sbitmap, sbitmap, sbitmap);
51 static void spill_coalprop (void);
52 static void allocate_spill_web (struct web *);
53 static void choose_spill_colors (void);
54 static void rewrite_program (bitmap);
55 static void remember_slot (struct rtx_list **, rtx);
56 static int slots_overlap_p (rtx, rtx);
57 static void delete_overlapping_slots (struct rtx_list **, rtx);
58 static int slot_member_p (struct rtx_list *, rtx);
59 static void insert_stores (bitmap);
60 static int spill_same_color_p (struct web *, struct web *);
61 static bool is_partly_live_1 (sbitmap, struct web *);
62 static void update_spill_colors (HARD_REG_SET *, struct web *, int);
63 static int spill_is_free (HARD_REG_SET *, struct web *);
64 static void emit_loads (struct rewrite_info *, int, rtx);
65 static void reloads_to_loads (struct rewrite_info *, struct ref **,
66 unsigned int, struct web **);
67 static void rewrite_program2 (bitmap);
68 static void mark_refs_for_checking (struct web *, bitmap);
69 static void detect_web_parts_to_rebuild (void);
70 static void delete_useless_defs (void);
71 static void detect_non_changed_webs (void);
72 static void reset_changed_flag (void);
74 /* For tracking some statistics, we count the number (and cost)
75 of deleted move insns. */
76 static unsigned int deleted_move_insns;
77 static unsigned HOST_WIDE_INT deleted_move_cost;
79 /* This is the spill coalescing phase. In SPILLED the IDs of all
80 already spilled webs are noted. In COALESCED the IDs of webs still
81 to check for coalescing. This tries to coalesce two webs, which were
82 spilled, are connected by a move, and don't conflict. Greatly
83 reduces memory shuffling. */
85 static void
86 spill_coalescing (sbitmap coalesce, sbitmap spilled)
88 struct move_list *ml;
89 struct move *m;
90 for (ml = wl_moves; ml; ml = ml->next)
91 if ((m = ml->move) != NULL)
93 struct web *s = alias (m->source_web);
94 struct web *t = alias (m->target_web);
95 if ((TEST_BIT (spilled, s->id) && TEST_BIT (coalesce, t->id))
96 || (TEST_BIT (spilled, t->id) && TEST_BIT (coalesce, s->id)))
98 struct conflict_link *wl;
99 if (TEST_BIT (sup_igraph, s->id * num_webs + t->id)
100 || TEST_BIT (sup_igraph, t->id * num_webs + s->id)
101 || s->pattern || t->pattern)
102 continue;
104 deleted_move_insns++;
105 deleted_move_cost += BLOCK_FOR_INSN (m->insn)->frequency + 1;
106 PUT_CODE (m->insn, NOTE);
107 NOTE_LINE_NUMBER (m->insn) = NOTE_INSN_DELETED;
108 df_insn_modify (df, BLOCK_FOR_INSN (m->insn), m->insn);
110 m->target_web->target_of_spilled_move = 1;
111 if (s == t)
112 /* May be, already coalesced due to a former move. */
113 continue;
114 /* Merge the nodes S and T in the I-graph. Beware: the merging
115 of conflicts relies on the fact, that in the conflict list
116 of T all of it's conflicts are noted. This is currently not
117 the case if T would be the target of a coalesced web, because
118 then (in combine () above) only those conflicts were noted in
119 T from the web which was coalesced into T, which at the time
120 of combine() were not already on the SELECT stack or were
121 itself coalesced to something other. */
122 gcc_assert (t->type == SPILLED
123 && s->type == SPILLED);
124 remove_list (t->dlink, &WEBS(SPILLED));
125 put_web (t, COALESCED);
126 t->alias = s;
127 s->is_coalesced = 1;
128 t->is_coalesced = 1;
129 merge_moves (s, t);
130 for (wl = t->conflict_list; wl; wl = wl->next)
132 struct web *pweb = wl->t;
133 if (wl->sub == NULL)
134 record_conflict (s, pweb);
135 else
137 struct sub_conflict *sl;
138 for (sl = wl->sub; sl; sl = sl->next)
140 struct web *sweb = NULL;
141 if (SUBWEB_P (sl->s))
142 sweb = find_subweb (s, sl->s->orig_x);
143 if (!sweb)
144 sweb = s;
145 record_conflict (sweb, sl->t);
148 /* No decrement_degree here, because we already have colored
149 the graph, and don't want to insert pweb into any other
150 list. */
151 pweb->num_conflicts -= 1 + t->add_hardregs;
157 /* Returns the probable saving of coalescing WEB with webs from
158 SPILLED, in terms of removed move insn cost. */
160 static unsigned HOST_WIDE_INT
161 spill_prop_savings (struct web *web, sbitmap spilled)
163 unsigned HOST_WIDE_INT savings = 0;
164 struct move_list *ml;
165 struct move *m;
166 unsigned int cost;
167 if (web->pattern)
168 return 0;
169 cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 1);
170 cost += 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x), web->regclass, 0);
171 for (ml = wl_moves; ml; ml = ml->next)
172 if ((m = ml->move) != NULL)
174 struct web *s = alias (m->source_web);
175 struct web *t = alias (m->target_web);
176 if (s != web)
178 struct web *h = s;
179 s = t;
180 t = h;
182 if (s != web || !TEST_BIT (spilled, t->id) || t->pattern
183 || TEST_BIT (sup_igraph, s->id * num_webs + t->id)
184 || TEST_BIT (sup_igraph, t->id * num_webs + s->id))
185 continue;
186 savings += BLOCK_FOR_INSN (m->insn)->frequency * cost;
188 return savings;
191 /* This add all IDs of colored webs, which are connected to WEB by a move
192 to LIST and PROCESSED. */
194 static void
195 spill_prop_insert (struct web *web, sbitmap list, sbitmap processed)
197 struct move_list *ml;
198 struct move *m;
199 for (ml = wl_moves; ml; ml = ml->next)
200 if ((m = ml->move) != NULL)
202 struct web *s = alias (m->source_web);
203 struct web *t = alias (m->target_web);
204 if (s != web)
206 struct web *h = s;
207 s = t;
208 t = h;
210 if (s != web || t->type != COLORED || TEST_BIT (processed, t->id))
211 continue;
212 SET_BIT (list, t->id);
213 SET_BIT (processed, t->id);
217 /* The spill propagation pass. If we have to spilled webs, the first
218 connected through a move to a colored one, and the second also connected
219 to that colored one, and this colored web is only used to connect both
220 spilled webs, it might be worthwhile to spill that colored one.
221 This is the case, if the cost of the removed copy insns (all three webs
222 could be placed into the same stack slot) is higher than the spill cost
223 of the web.
224 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
225 SPILLED the set of all spilled webs so far and PROCESSED the set
226 of all webs processed so far, so we don't do work twice. */
228 static int
229 spill_propagation (sbitmap to_prop, sbitmap spilled, sbitmap processed)
231 int id;
232 int again = 0;
233 sbitmap list = sbitmap_alloc (num_webs);
234 sbitmap_zero (list);
236 /* First insert colored move neighbors into the candidate list. */
237 EXECUTE_IF_SET_IN_SBITMAP (to_prop, 0, id,
239 spill_prop_insert (ID2WEB (id), list, processed);
241 sbitmap_zero (to_prop);
243 /* For all candidates, see, if the savings are higher than it's
244 spill cost. */
245 while ((id = sbitmap_first_set_bit (list)) >= 0)
247 struct web *web = ID2WEB (id);
248 RESET_BIT (list, id);
249 if (spill_prop_savings (web, spilled) >= web->spill_cost)
251 /* If so, we found a new spilled web. Insert it's colored
252 move neighbors again, and mark, that we need to repeat the
253 whole mainloop of spillprog/coalescing again. */
254 remove_web_from_list (web);
255 web->color = -1;
256 put_web (web, SPILLED);
257 SET_BIT (spilled, id);
258 SET_BIT (to_prop, id);
259 spill_prop_insert (web, list, processed);
260 again = 1;
263 sbitmap_free (list);
264 return again;
267 /* The main phase to improve spill costs. This repeatedly runs
268 spill coalescing and spill propagation, until nothing changes. */
270 static void
271 spill_coalprop (void)
273 sbitmap spilled, processed, to_prop;
274 struct dlist *d;
275 int again;
276 spilled = sbitmap_alloc (num_webs);
277 processed = sbitmap_alloc (num_webs);
278 to_prop = sbitmap_alloc (num_webs);
279 sbitmap_zero (spilled);
280 for (d = WEBS(SPILLED); d; d = d->next)
281 SET_BIT (spilled, DLIST_WEB (d)->id);
282 sbitmap_copy (to_prop, spilled);
283 sbitmap_zero (processed);
286 spill_coalescing (to_prop, spilled);
287 /* XXX Currently (with optimistic coalescing) spill_propagation()
288 doesn't give better code, sometimes it gives worse (but not by much)
289 code. I believe this is because of slightly wrong cost
290 measurements. Anyway right now it isn't worth the time it takes,
291 so deactivate it for now. */
292 again = 0 && spill_propagation (to_prop, spilled, processed);
294 while (again);
295 sbitmap_free (to_prop);
296 sbitmap_free (processed);
297 sbitmap_free (spilled);
300 /* Allocate a spill slot for a WEB. Currently we spill to pseudo
301 registers, to be able to track also webs for "stack slots", and also
302 to possibly colorize them. These pseudos are sometimes handled
303 in a special way, where we know, that they also can represent
304 MEM references. */
306 static void
307 allocate_spill_web (struct web *web)
309 int regno = web->regno;
310 rtx slot;
311 if (web->stack_slot)
312 return;
313 slot = gen_reg_rtx (PSEUDO_REGNO_MODE (regno));
314 web->stack_slot = slot;
317 /* This chooses a color for all SPILLED webs for interference region
318 spilling. The heuristic isn't good in any way. */
320 static void
321 choose_spill_colors (void)
323 struct dlist *d;
324 unsigned HOST_WIDE_INT *costs = xmalloc (FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
325 for (d = WEBS(SPILLED); d; d = d->next)
327 struct web *web = DLIST_WEB (d);
328 struct conflict_link *wl;
329 int bestc, c;
330 HARD_REG_SET avail;
331 memset (costs, 0, FIRST_PSEUDO_REGISTER * sizeof (costs[0]));
332 for (wl = web->conflict_list; wl; wl = wl->next)
334 struct web *pweb = wl->t;
335 if (pweb->type == COLORED || pweb->type == PRECOLORED)
336 costs[pweb->color] += pweb->spill_cost;
339 COPY_HARD_REG_SET (avail, web->usable_regs);
340 if (web->crosses_call)
342 /* Add an arbitrary constant cost to colors not usable by
343 call-crossing webs without saves/loads. */
344 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
345 if (TEST_HARD_REG_BIT (call_used_reg_set, c))
346 costs[c] += 1000;
348 bestc = -1;
349 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
350 if ((bestc < 0 || costs[bestc] > costs[c])
351 && TEST_HARD_REG_BIT (avail, c)
352 && HARD_REGNO_MODE_OK (c, PSEUDO_REGNO_MODE (web->regno)))
354 int i, size;
355 size = hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
356 for (i = 1; i < size
357 && TEST_HARD_REG_BIT (avail, c + i); i++);
358 if (i == size)
359 bestc = c;
361 web->color = bestc;
362 ra_debug_msg (DUMP_PROCESS, "choosing color %d for spilled web %d\n",
363 bestc, web->id);
366 free (costs);
369 /* For statistics sake we count the number and cost of all new loads,
370 stores and emitted rematerializations. */
371 static unsigned int emitted_spill_loads;
372 static unsigned int emitted_spill_stores;
373 static unsigned int emitted_remat;
374 static unsigned HOST_WIDE_INT spill_load_cost;
375 static unsigned HOST_WIDE_INT spill_store_cost;
376 static unsigned HOST_WIDE_INT spill_remat_cost;
378 /* In rewrite_program2() we detect if some def us useless, in the sense,
379 that the pseudo set is not live anymore at that point. The REF_IDs
380 of such defs are noted here. */
381 static bitmap useless_defs;
383 /* This is the simple and fast version of rewriting the program to
384 include spill code. It spills at every insn containing spilled
385 defs or uses. Loads are added only if flag_ra_spill_every_use is
386 nonzero, otherwise only stores will be added. This doesn't
387 support rematerialization.
388 NEW_DEATHS is filled with uids for insns, which probably contain
389 deaths. */
391 static void
392 rewrite_program (bitmap new_deaths)
394 unsigned int i;
395 struct dlist *d;
396 bitmap b = BITMAP_XMALLOC ();
398 /* We walk over all webs, over all uses/defs. For all webs, we need
399 to look at spilled webs, and webs coalesced to spilled ones, in case
400 their alias isn't broken up, or they got spill coalesced. */
401 for (i = 0; i < 2; i++)
402 for (d = (i == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
404 struct web *web = DLIST_WEB (d);
405 struct web *aweb = alias (web);
406 unsigned int j;
407 rtx slot;
409 /* Is trivially true for spilled webs, but not for coalesced ones. */
410 if (aweb->type != SPILLED)
411 continue;
413 /* First add loads before every use, if we have to. */
414 if (flag_ra_spill_every_use)
416 bitmap_clear (b);
417 allocate_spill_web (aweb);
418 slot = aweb->stack_slot;
419 for (j = 0; j < web->num_uses; j++)
421 rtx insns, target, source;
422 rtx insn = DF_REF_INSN (web->uses[j]);
423 rtx prev = PREV_INSN (insn);
424 basic_block bb = BLOCK_FOR_INSN (insn);
425 /* Happens when spill_coalescing() deletes move insns. */
426 if (!INSN_P (insn))
427 continue;
429 /* Check that we didn't already added a load for this web
430 and insn. Happens, when the an insn uses the same web
431 multiple times. */
432 if (bitmap_bit_p (b, INSN_UID (insn)))
433 continue;
434 bitmap_set_bit (b, INSN_UID (insn));
435 target = DF_REF_REG (web->uses[j]);
436 source = slot;
437 start_sequence ();
438 if (GET_CODE (target) == SUBREG)
439 source = simplify_gen_subreg (GET_MODE (target), source,
440 GET_MODE (source),
441 SUBREG_BYTE (target));
442 ra_emit_move_insn (target, source);
443 insns = get_insns ();
444 end_sequence ();
445 emit_insn_before (insns, insn);
447 if (BB_HEAD (bb) == insn)
448 BB_HEAD (bb) = NEXT_INSN (prev);
449 for (insn = PREV_INSN (insn); insn != prev;
450 insn = PREV_INSN (insn))
452 set_block_for_insn (insn, bb);
453 df_insn_modify (df, bb, insn);
456 emitted_spill_loads++;
457 spill_load_cost += bb->frequency + 1;
461 /* Now emit the stores after each def.
462 If any uses were loaded from stackslots (compared to
463 rematerialized or not reloaded due to IR spilling),
464 aweb->stack_slot will be set. If not, we don't need to emit
465 any stack stores. */
466 slot = aweb->stack_slot;
467 bitmap_clear (b);
468 if (slot)
469 for (j = 0; j < web->num_defs; j++)
471 rtx insns, source, dest;
472 rtx insn = DF_REF_INSN (web->defs[j]);
473 rtx following = NEXT_INSN (insn);
474 basic_block bb = BLOCK_FOR_INSN (insn);
475 /* Happens when spill_coalescing() deletes move insns. */
476 if (!INSN_P (insn))
477 continue;
478 if (bitmap_bit_p (b, INSN_UID (insn)))
479 continue;
480 bitmap_set_bit (b, INSN_UID (insn));
481 start_sequence ();
482 source = DF_REF_REG (web->defs[j]);
483 dest = slot;
484 if (GET_CODE (source) == SUBREG)
485 dest = simplify_gen_subreg (GET_MODE (source), dest,
486 GET_MODE (dest),
487 SUBREG_BYTE (source));
488 ra_emit_move_insn (dest, source);
490 insns = get_insns ();
491 end_sequence ();
492 if (insns)
494 emit_insn_after (insns, insn);
495 if (BB_END (bb) == insn)
496 BB_END (bb) = PREV_INSN (following);
497 for (insn = insns; insn != following; insn = NEXT_INSN (insn))
499 set_block_for_insn (insn, bb);
500 df_insn_modify (df, bb, insn);
503 else
504 df_insn_modify (df, bb, insn);
505 emitted_spill_stores++;
506 spill_store_cost += bb->frequency + 1;
507 /* XXX we should set new_deaths for all inserted stores
508 whose pseudo dies here.
509 Note, that this isn't the case for _all_ stores. */
510 /* I.e. the next is wrong, and might cause some spilltemps
511 to be categorized as spilltemp2's (i.e. live over a death),
512 although they aren't. This might make them spill again,
513 which causes endlessness in the case, this insn is in fact
514 _no_ death. */
515 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
519 BITMAP_XFREE (b);
522 /* A simple list of rtx's. */
523 struct rtx_list
525 struct rtx_list *next;
526 rtx x;
529 /* Adds X to *LIST. */
531 static void
532 remember_slot (struct rtx_list **list, rtx x)
534 struct rtx_list *l;
535 /* PRE: X is not already in LIST. */
536 l = ra_alloc (sizeof (*l));
537 l->next = *list;
538 l->x = x;
539 *list = l;
542 /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
543 thereof), return nonzero, if they overlap. REGs and MEMs don't
544 overlap, and if they are MEMs they must have an easy address
545 (plus (basereg) (const_inst x)), otherwise they overlap. */
547 static int
548 slots_overlap_p (rtx s1, rtx s2)
550 rtx base1, base2;
551 HOST_WIDE_INT ofs1 = 0, ofs2 = 0;
552 int size1 = GET_MODE_SIZE (GET_MODE (s1));
553 int size2 = GET_MODE_SIZE (GET_MODE (s2));
554 if (GET_CODE (s1) == SUBREG)
555 ofs1 = SUBREG_BYTE (s1), s1 = SUBREG_REG (s1);
556 if (GET_CODE (s2) == SUBREG)
557 ofs2 = SUBREG_BYTE (s2), s2 = SUBREG_REG (s2);
559 if (s1 == s2)
560 return 1;
562 if (GET_CODE (s1) != GET_CODE (s2))
563 return 0;
565 if (REG_P (s1) && REG_P (s2))
567 if (REGNO (s1) != REGNO (s2))
568 return 0;
569 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
570 return 0;
571 return 1;
573 gcc_assert (MEM_P (s1) && GET_CODE (s2) == MEM);
574 s1 = XEXP (s1, 0);
575 s2 = XEXP (s2, 0);
576 if (GET_CODE (s1) != PLUS || !REG_P (XEXP (s1, 0))
577 || GET_CODE (XEXP (s1, 1)) != CONST_INT)
578 return 1;
579 if (GET_CODE (s2) != PLUS || !REG_P (XEXP (s2, 0))
580 || GET_CODE (XEXP (s2, 1)) != CONST_INT)
581 return 1;
582 base1 = XEXP (s1, 0);
583 base2 = XEXP (s2, 0);
584 if (!rtx_equal_p (base1, base2))
585 return 1;
586 ofs1 += INTVAL (XEXP (s1, 1));
587 ofs2 += INTVAL (XEXP (s2, 1));
588 if (ofs1 >= ofs2 + size2 || ofs2 >= ofs1 + size1)
589 return 0;
590 return 1;
593 /* This deletes from *LIST all rtx's which overlap with X in the sense
594 of slots_overlap_p(). */
596 static void
597 delete_overlapping_slots (struct rtx_list **list, rtx x)
599 while (*list)
601 if (slots_overlap_p ((*list)->x, x))
602 *list = (*list)->next;
603 else
604 list = &((*list)->next);
608 /* Returns nonzero, of X is member of LIST. */
610 static int
611 slot_member_p (struct rtx_list *list, rtx x)
613 for (;list; list = list->next)
614 if (rtx_equal_p (list->x, x))
615 return 1;
616 return 0;
619 /* A more sophisticated (and slower) method of adding the stores, than
620 rewrite_program(). This goes backward the insn stream, adding
621 stores as it goes, but only if it hasn't just added a store to the
622 same location. NEW_DEATHS is a bitmap filled with uids of insns
623 containing deaths. */
625 static void
626 insert_stores (bitmap new_deaths)
628 rtx insn;
629 rtx last_slot = NULL_RTX;
630 struct rtx_list *slots = NULL;
632 /* We go simply backwards over basic block borders. */
633 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
635 int uid = INSN_UID (insn);
637 /* If we reach a basic block border, which has more than one
638 outgoing edge, we simply forget all already emitted stores. */
639 if (BARRIER_P (insn)
640 || JUMP_P (insn) || can_throw_internal (insn))
642 last_slot = NULL_RTX;
643 slots = NULL;
645 if (!INSN_P (insn))
646 continue;
648 /* If this insn was not just added in this pass. */
649 if (uid < insn_df_max_uid)
651 unsigned int n;
652 rtx following = NEXT_INSN (insn);
653 basic_block bb = BLOCK_FOR_INSN (insn);
654 struct ra_insn_info info;
656 info = insn_df[uid];
657 for (n = 0; n < info.num_defs; n++)
659 struct web *web = def2web[DF_REF_ID (info.defs[n])];
660 struct web *aweb = alias (find_web_for_subweb (web));
661 rtx slot, source;
662 if (aweb->type != SPILLED || !aweb->stack_slot)
663 continue;
664 slot = aweb->stack_slot;
665 source = DF_REF_REG (info.defs[n]);
666 /* adjust_address() might generate code. */
667 start_sequence ();
668 if (GET_CODE (source) == SUBREG)
669 slot = simplify_gen_subreg (GET_MODE (source), slot,
670 GET_MODE (slot),
671 SUBREG_BYTE (source));
672 /* If we have no info about emitted stores, or it didn't
673 contain the location we intend to use soon, then
674 add the store. */
675 if ((!last_slot || !rtx_equal_p (slot, last_slot))
676 && ! slot_member_p (slots, slot))
678 rtx insns, ni;
679 last_slot = slot;
680 remember_slot (&slots, slot);
681 ra_emit_move_insn (slot, source);
682 insns = get_insns ();
683 end_sequence ();
684 if (insns)
686 emit_insn_after (insns, insn);
687 if (BB_END (bb) == insn)
688 BB_END (bb) = PREV_INSN (following);
689 for (ni = insns; ni != following; ni = NEXT_INSN (ni))
691 set_block_for_insn (ni, bb);
692 df_insn_modify (df, bb, ni);
695 else
696 df_insn_modify (df, bb, insn);
697 emitted_spill_stores++;
698 spill_store_cost += bb->frequency + 1;
699 bitmap_set_bit (new_deaths, INSN_UID (PREV_INSN (following)));
701 else
703 /* Otherwise ignore insns from adjust_address() above. */
704 end_sequence ();
708 /* If we look at a load generated by the allocator, forget
709 the last emitted slot, and additionally clear all slots
710 overlapping it's source (after all, we need it again). */
711 /* XXX If we emit the stack-ref directly into the using insn the
712 following needs a change, because that is no new insn. Preferably
713 we would add some notes to the insn, what stackslots are needed
714 for it. */
715 if (uid >= last_max_uid)
717 rtx set = single_set (insn);
718 last_slot = NULL_RTX;
719 /* If this was no simple set, give up, and forget everything. */
720 if (!set)
721 slots = NULL;
722 else
724 if (1 || MEM_P (SET_SRC (set)))
725 delete_overlapping_slots (&slots, SET_SRC (set));
731 /* Returns 1 if both colored webs have some hardregs in common, even if
732 they are not the same width. */
734 static int
735 spill_same_color_p (struct web *web1, struct web *web2)
737 int c1, size1, c2, size2;
738 if ((c1 = alias (web1)->color) < 0 || c1 == an_unusable_color)
739 return 0;
740 if ((c2 = alias (web2)->color) < 0 || c2 == an_unusable_color)
741 return 0;
743 size1 = web1->type == PRECOLORED
744 ? 1 : hard_regno_nregs[c1][PSEUDO_REGNO_MODE (web1->regno)];
745 size2 = web2->type == PRECOLORED
746 ? 1 : hard_regno_nregs[c2][PSEUDO_REGNO_MODE (web2->regno)];
747 if (c1 >= c2 + size2 || c2 >= c1 + size1)
748 return 0;
749 return 1;
752 /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
753 subwebs (or WEB itself) is live. */
755 static bool
756 is_partly_live_1 (sbitmap live, struct web *web)
759 if (TEST_BIT (live, web->id))
760 return 1;
761 while ((web = web->subreg_next));
762 return 0;
765 /* Fast version in case WEB has no subwebs. */
766 #define is_partly_live(live, web) ((!web->subreg_next) \
767 ? TEST_BIT (live, web->id) \
768 : is_partly_live_1 (live, web))
770 /* Change the set of currently IN_USE colors according to
771 WEB's color. Either add those colors to the hardreg set (if ADD
772 is nonzero), or remove them. */
774 static void
775 update_spill_colors (HARD_REG_SET *in_use, struct web *web, int add)
777 int c, size;
778 if ((c = alias (find_web_for_subweb (web))->color) < 0
779 || c == an_unusable_color)
780 return;
781 size = hard_regno_nregs[c][GET_MODE (web->orig_x)];
782 if (SUBWEB_P (web))
784 c += subreg_regno_offset (c, GET_MODE (SUBREG_REG (web->orig_x)),
785 SUBREG_BYTE (web->orig_x),
786 GET_MODE (web->orig_x));
788 else if (web->type == PRECOLORED)
789 size = 1;
790 if (add)
791 for (; size--;)
792 SET_HARD_REG_BIT (*in_use, c + size);
793 else
794 for (; size--;)
795 CLEAR_HARD_REG_BIT (*in_use, c + size);
798 /* Given a set of hardregs currently IN_USE and the color C of WEB,
799 return -1 if WEB has no color, 1 of it has the unusable color,
800 0 if one of it's used hardregs are in use, and 1 otherwise.
801 Generally, if WEB can't be left colorized return 1. */
803 static int
804 spill_is_free (HARD_REG_SET *in_use, struct web *web)
806 int c, size;
807 if ((c = alias (web)->color) < 0)
808 return -1;
809 if (c == an_unusable_color)
810 return 1;
811 size = web->type == PRECOLORED
812 ? 1 : hard_regno_nregs[c][PSEUDO_REGNO_MODE (web->regno)];
813 for (; size--;)
814 if (TEST_HARD_REG_BIT (*in_use, c + size))
815 return 0;
816 return 1;
820 /* Structure for passing between rewrite_program2() and emit_loads(). */
821 struct rewrite_info
823 /* The web IDs which currently would need a reload. These are
824 currently live spilled webs, whose color was still free. */
825 bitmap need_reload;
826 /* We need a scratch bitmap, but don't want to allocate one a zillion
827 times. */
828 bitmap scratch;
829 /* Web IDs of currently live webs. This are the precise IDs,
830 not just those of the superwebs. If only on part is live, only
831 that ID is placed here. */
832 sbitmap live;
833 /* An array of webs, which currently need a load added.
834 They will be emitted when seeing the first death. */
835 struct web **needed_loads;
836 /* The current number of entries in needed_loads. */
837 int nl_size;
838 /* The number of bits set in need_reload. */
839 int num_reloads;
840 /* The current set of hardregs not available. */
841 HARD_REG_SET colors_in_use;
842 /* Nonzero, if we just added some spill temps to need_reload or
843 needed_loads. In this case we don't wait for the next death
844 to emit their loads. */
845 int any_spilltemps_spilled;
846 /* Nonzero, if we currently need to emit the loads. E.g. when we
847 saw an insn containing deaths. */
848 int need_load;
851 /* The needed_loads list of RI contains some webs for which
852 we add the actual load insns here. They are added just before
853 their use last seen. NL_FIRST_RELOAD is the index of the first
854 load which is a converted reload, all other entries are normal
855 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
857 static void
858 emit_loads (struct rewrite_info *ri, int nl_first_reload, rtx last_block_insn)
860 int j;
861 for (j = ri->nl_size; j;)
863 struct web *web = ri->needed_loads[--j];
864 struct web *supweb;
865 struct web *aweb;
866 rtx ni, slot, reg;
867 rtx before = NULL_RTX, after = NULL_RTX;
868 basic_block bb;
869 /* When spilltemps were spilled for the last insns, their
870 loads already are emitted, which is noted by setting
871 needed_loads[] for it to 0. */
872 if (!web)
873 continue;
874 supweb = find_web_for_subweb (web);
875 gcc_assert (supweb->regno < max_normal_pseudo);
876 /* Check for web being a spilltemp, if we only want to
877 load spilltemps. Also remember, that we emitted that
878 load, which we don't need to do when we have a death,
879 because then all of needed_loads[] is emptied. */
880 if (!ri->need_load)
882 if (!supweb->spill_temp)
883 continue;
884 else
885 ri->needed_loads[j] = 0;
887 web->in_load = 0;
888 /* The adding of reloads doesn't depend on liveness. */
889 if (j < nl_first_reload && !TEST_BIT (ri->live, web->id))
890 continue;
891 aweb = alias (supweb);
892 aweb->changed = 1;
893 start_sequence ();
894 if (supweb->pattern)
896 /* XXX If we later allow non-constant sources for rematerialization
897 we must also disallow coalescing _to_ rematerialized webs
898 (at least then disallow spilling them, which we already ensure
899 when flag_ra_break_aliases), or not take the pattern but a
900 stackslot. */
901 gcc_assert (aweb == supweb);
902 slot = copy_rtx (supweb->pattern);
903 reg = copy_rtx (supweb->orig_x);
904 /* Sanity check. orig_x should be a REG rtx, which should be
905 shared over all RTL, so copy_rtx should have no effect. */
906 gcc_assert (reg == supweb->orig_x);
908 else
910 allocate_spill_web (aweb);
911 slot = aweb->stack_slot;
913 /* If we don't copy the RTL there might be some SUBREG
914 rtx shared in the next iteration although being in
915 different webs, which leads to wrong code. */
916 reg = copy_rtx (web->orig_x);
917 if (GET_CODE (reg) == SUBREG)
918 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
919 (reg));*/
920 slot = simplify_gen_subreg (GET_MODE (reg), slot, GET_MODE (slot),
921 SUBREG_BYTE (reg));
923 ra_emit_move_insn (reg, slot);
924 ni = get_insns ();
925 end_sequence ();
926 before = web->last_use_insn;
927 web->last_use_insn = NULL_RTX;
928 if (!before)
930 if (JUMP_P (last_block_insn))
931 before = last_block_insn;
932 else
933 after = last_block_insn;
935 if (after)
937 rtx foll = NEXT_INSN (after);
938 bb = BLOCK_FOR_INSN (after);
939 emit_insn_after (ni, after);
940 if (BB_END (bb) == after)
941 BB_END (bb) = PREV_INSN (foll);
942 for (ni = NEXT_INSN (after); ni != foll; ni = NEXT_INSN (ni))
944 set_block_for_insn (ni, bb);
945 df_insn_modify (df, bb, ni);
948 else
950 rtx prev = PREV_INSN (before);
951 bb = BLOCK_FOR_INSN (before);
952 emit_insn_before (ni, before);
953 if (BB_HEAD (bb) == before)
954 BB_HEAD (bb) = NEXT_INSN (prev);
955 for (; ni != before; ni = NEXT_INSN (ni))
957 set_block_for_insn (ni, bb);
958 df_insn_modify (df, bb, ni);
961 if (supweb->pattern)
963 emitted_remat++;
964 spill_remat_cost += bb->frequency + 1;
966 else
968 emitted_spill_loads++;
969 spill_load_cost += bb->frequency + 1;
971 RESET_BIT (ri->live, web->id);
972 /* In the special case documented above only emit the reloads and
973 one load. */
974 if (ri->need_load == 2 && j < nl_first_reload)
975 break;
977 if (ri->need_load)
978 ri->nl_size = j;
981 /* Given a set of reloads in RI, an array of NUM_REFS references (either
982 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
983 (either use2web or def2web) convert some reloads to loads.
984 This looks at the webs referenced, and how they change the set of
985 available colors. Now put all still live webs, which needed reloads,
986 and whose colors isn't free anymore, on the needed_loads list. */
988 static void
989 reloads_to_loads (struct rewrite_info *ri, struct ref **refs,
990 unsigned int num_refs, struct web **ref2web)
992 unsigned int n;
993 int num_reloads = ri->num_reloads;
994 for (n = 0; n < num_refs && num_reloads; n++)
996 struct web *web = ref2web[DF_REF_ID (refs[n])];
997 struct web *supweb = find_web_for_subweb (web);
998 int is_death;
999 int j;
1000 /* Only emit reloads when entering their interference
1001 region. A use of a spilled web never opens an
1002 interference region, independent of it's color. */
1003 if (alias (supweb)->type == SPILLED)
1004 continue;
1005 if (supweb->type == PRECOLORED
1006 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1007 continue;
1008 /* Note, that if web (and supweb) are DEFs, we already cleared
1009 the corresponding bits in live. I.e. is_death becomes true, which
1010 is what we want. */
1011 is_death = !TEST_BIT (ri->live, supweb->id);
1012 is_death &= !TEST_BIT (ri->live, web->id);
1013 if (is_death)
1015 int old_num_r = num_reloads;
1016 bitmap_iterator bi;
1018 bitmap_clear (ri->scratch);
1019 EXECUTE_IF_SET_IN_BITMAP (ri->need_reload, 0, j, bi)
1021 struct web *web2 = ID2WEB (j);
1022 struct web *aweb2 = alias (find_web_for_subweb (web2));
1023 gcc_assert (spill_is_free (&(ri->colors_in_use), aweb2) != 0);
1024 if (spill_same_color_p (supweb, aweb2)
1025 /* && interfere (web, web2) */)
1027 if (!web2->in_load)
1029 ri->needed_loads[ri->nl_size++] = web2;
1030 web2->in_load = 1;
1032 bitmap_set_bit (ri->scratch, j);
1033 num_reloads--;
1036 if (num_reloads != old_num_r)
1037 bitmap_operation (ri->need_reload, ri->need_reload, ri->scratch,
1038 BITMAP_AND_COMPL);
1041 ri->num_reloads = num_reloads;
1044 /* This adds loads for spilled webs to the program. It uses a kind of
1045 interference region spilling. If flag_ra_ir_spilling is zero it
1046 only uses improved chaitin spilling (adding loads only at insns
1047 containing deaths). */
1049 static void
1050 rewrite_program2 (bitmap new_deaths)
1052 basic_block bb = NULL;
1053 int nl_first_reload;
1054 struct rewrite_info ri;
1055 rtx insn;
1056 ri.needed_loads = xmalloc (num_webs * sizeof (struct web *));
1057 ri.need_reload = BITMAP_XMALLOC ();
1058 ri.scratch = BITMAP_XMALLOC ();
1059 ri.live = sbitmap_alloc (num_webs);
1060 ri.nl_size = 0;
1061 ri.num_reloads = 0;
1062 for (insn = get_last_insn (); insn; insn = PREV_INSN (insn))
1064 basic_block last_bb = NULL;
1065 rtx last_block_insn;
1066 int i, j;
1067 bitmap_iterator bi;
1069 if (!INSN_P (insn))
1070 insn = prev_real_insn (insn);
1071 while (insn && !(bb = BLOCK_FOR_INSN (insn)))
1072 insn = prev_real_insn (insn);
1073 if (!insn)
1074 break;
1075 i = bb->index + 2;
1076 last_block_insn = insn;
1078 sbitmap_zero (ri.live);
1079 CLEAR_HARD_REG_SET (ri.colors_in_use);
1080 EXECUTE_IF_SET_IN_BITMAP (live_at_end[i - 2], 0, j, bi)
1082 struct web *web = use2web[j];
1083 struct web *aweb = alias (find_web_for_subweb (web));
1084 /* A web is only live at end, if it isn't spilled. If we wouldn't
1085 check this, the last uses of spilled web per basic block
1086 wouldn't be detected as deaths, although they are in the final
1087 code. This would lead to cumulating many loads without need,
1088 only increasing register pressure. */
1089 /* XXX do add also spilled webs which got a color for IR spilling.
1090 Remember to not add to colors_in_use in that case. */
1091 if (aweb->type != SPILLED /*|| aweb->color >= 0*/)
1093 SET_BIT (ri.live, web->id);
1094 if (aweb->type != SPILLED)
1095 update_spill_colors (&(ri.colors_in_use), web, 1);
1099 bitmap_clear (ri.need_reload);
1100 ri.num_reloads = 0;
1101 ri.any_spilltemps_spilled = 0;
1102 if (flag_ra_ir_spilling)
1104 struct dlist *d;
1105 int pass;
1106 /* XXX If we don't add spilled nodes into live above, the following
1107 becomes an empty loop. */
1108 for (pass = 0; pass < 2; pass++)
1109 for (d = (pass) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1111 struct web *web = DLIST_WEB (d);
1112 struct web *aweb = alias (web);
1113 if (aweb->type != SPILLED)
1114 continue;
1115 if (is_partly_live (ri.live, web)
1116 && spill_is_free (&(ri.colors_in_use), web) > 0)
1118 ri.num_reloads++;
1119 bitmap_set_bit (ri.need_reload, web->id);
1120 /* Last using insn is somewhere in another block. */
1121 web->last_use_insn = NULL_RTX;
1126 last_bb = bb;
1127 for (; insn; insn = PREV_INSN (insn))
1129 struct ra_insn_info info;
1130 unsigned int n;
1132 memset (&info, 0, sizeof info);
1134 if (INSN_P (insn) && BLOCK_FOR_INSN (insn) != last_bb)
1136 int index = BLOCK_FOR_INSN (insn)->index + 2;
1137 bitmap_iterator bi;
1139 EXECUTE_IF_SET_IN_BITMAP (live_at_end[index - 2], 0, j, bi)
1141 struct web *web = use2web[j];
1142 struct web *aweb = alias (find_web_for_subweb (web));
1143 if (aweb->type != SPILLED)
1145 SET_BIT (ri.live, web->id);
1146 update_spill_colors (&(ri.colors_in_use), web, 1);
1149 bitmap_clear (ri.scratch);
1150 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
1152 struct web *web2 = ID2WEB (j);
1153 struct web *supweb2 = find_web_for_subweb (web2);
1154 struct web *aweb2 = alias (supweb2);
1155 if (spill_is_free (&(ri.colors_in_use), aweb2) <= 0)
1157 if (!web2->in_load)
1159 ri.needed_loads[ri.nl_size++] = web2;
1160 web2->in_load = 1;
1162 bitmap_set_bit (ri.scratch, j);
1163 ri.num_reloads--;
1166 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1167 BITMAP_AND_COMPL);
1168 last_bb = BLOCK_FOR_INSN (insn);
1169 last_block_insn = insn;
1170 if (!INSN_P (last_block_insn))
1171 last_block_insn = prev_real_insn (last_block_insn);
1174 ri.need_load = 0;
1175 if (INSN_P (insn))
1176 info = insn_df[INSN_UID (insn)];
1178 if (INSN_P (insn))
1179 for (n = 0; n < info.num_defs; n++)
1181 struct ref *ref = info.defs[n];
1182 struct web *web = def2web[DF_REF_ID (ref)];
1183 struct web *supweb = find_web_for_subweb (web);
1184 int is_non_def = 0;
1185 unsigned int n2;
1187 supweb = find_web_for_subweb (web);
1188 /* Webs which are defined here, but also used in the same insn
1189 are rmw webs, or this use isn't a death because of looping
1190 constructs. In neither case makes this def available it's
1191 resources. Reloads for it are still needed, it's still
1192 live and it's colors don't become free. */
1193 for (n2 = 0; n2 < info.num_uses; n2++)
1195 struct web *web2 = use2web[DF_REF_ID (info.uses[n2])];
1196 if (supweb == find_web_for_subweb (web2))
1198 is_non_def = 1;
1199 break;
1202 if (is_non_def)
1203 continue;
1205 if (!is_partly_live (ri.live, supweb))
1206 bitmap_set_bit (useless_defs, DF_REF_ID (ref));
1208 RESET_BIT (ri.live, web->id);
1209 if (bitmap_bit_p (ri.need_reload, web->id))
1211 ri.num_reloads--;
1212 bitmap_clear_bit (ri.need_reload, web->id);
1214 if (web != supweb)
1216 /* XXX subwebs aren't precisely tracked here. We have
1217 everything we need (inverse webs), but the code isn't
1218 yet written. We need to make all completely
1219 overlapping web parts non-live here. */
1220 /* If by luck now the whole web isn't live anymore, no
1221 reloads for it are needed. */
1222 if (!is_partly_live (ri.live, supweb)
1223 && bitmap_bit_p (ri.need_reload, supweb->id))
1225 ri.num_reloads--;
1226 bitmap_clear_bit (ri.need_reload, supweb->id);
1229 else
1231 struct web *sweb;
1232 /* If the whole web is defined here, no parts of it are
1233 live anymore and no reloads are needed for them. */
1234 for (sweb = supweb->subreg_next; sweb;
1235 sweb = sweb->subreg_next)
1237 RESET_BIT (ri.live, sweb->id);
1238 if (bitmap_bit_p (ri.need_reload, sweb->id))
1240 ri.num_reloads--;
1241 bitmap_clear_bit (ri.need_reload, sweb->id);
1245 if (alias (supweb)->type != SPILLED)
1246 update_spill_colors (&(ri.colors_in_use), web, 0);
1249 nl_first_reload = ri.nl_size;
1251 /* CALL_INSNs are not really deaths, but still more registers
1252 are free after a call, than before.
1253 XXX Note, that sometimes reload barfs when we emit insns between
1254 a call and the insn which copies the return register into a
1255 pseudo. */
1256 if (CALL_P (insn))
1257 ri.need_load = 1;
1258 else if (INSN_P (insn))
1259 for (n = 0; n < info.num_uses; n++)
1261 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1262 struct web *supweb = find_web_for_subweb (web);
1263 int is_death;
1264 if (supweb->type == PRECOLORED
1265 && TEST_HARD_REG_BIT (never_use_colors, supweb->color))
1266 continue;
1267 is_death = !TEST_BIT (ri.live, supweb->id);
1268 is_death &= !TEST_BIT (ri.live, web->id);
1269 if (is_death)
1271 ri.need_load = 1;
1272 bitmap_set_bit (new_deaths, INSN_UID (insn));
1273 break;
1277 if (INSN_P (insn) && ri.num_reloads)
1279 int old_num_reloads = ri.num_reloads;
1280 reloads_to_loads (&ri, info.uses, info.num_uses, use2web);
1282 /* If this insn sets a pseudo, which isn't used later
1283 (i.e. wasn't live before) it is a dead store. We need
1284 to emit all reloads which have the same color as this def.
1285 We don't need to check for non-liveness here to detect
1286 the deadness (it anyway is too late, as we already cleared
1287 the liveness in the first loop over the defs), because if it
1288 _would_ be live here, no reload could have that color, as
1289 they would already have been converted to a load. */
1290 if (ri.num_reloads)
1291 reloads_to_loads (&ri, info.defs, info.num_defs, def2web);
1292 if (ri.num_reloads != old_num_reloads && !ri.need_load)
1293 ri.need_load = 1;
1296 if (ri.nl_size && (ri.need_load || ri.any_spilltemps_spilled))
1297 emit_loads (&ri, nl_first_reload, last_block_insn);
1299 if (INSN_P (insn) && flag_ra_ir_spilling)
1300 for (n = 0; n < info.num_uses; n++)
1302 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1303 struct web *aweb = alias (find_web_for_subweb (web));
1304 if (aweb->type != SPILLED)
1305 update_spill_colors (&(ri.colors_in_use), web, 1);
1308 ri.any_spilltemps_spilled = 0;
1309 if (INSN_P (insn))
1310 for (n = 0; n < info.num_uses; n++)
1312 struct web *web = use2web[DF_REF_ID (info.uses[n])];
1313 struct web *supweb = find_web_for_subweb (web);
1314 struct web *aweb = alias (supweb);
1315 SET_BIT (ri.live, web->id);
1316 if (aweb->type != SPILLED)
1317 continue;
1318 if (supweb->spill_temp)
1319 ri.any_spilltemps_spilled = 1;
1320 web->last_use_insn = insn;
1321 if (!web->in_load)
1323 if (spill_is_free (&(ri.colors_in_use), aweb) <= 0
1324 || !flag_ra_ir_spilling)
1326 ri.needed_loads[ri.nl_size++] = web;
1327 web->in_load = 1;
1328 web->one_load = 1;
1330 else if (!bitmap_bit_p (ri.need_reload, web->id))
1332 bitmap_set_bit (ri.need_reload, web->id);
1333 ri.num_reloads++;
1334 web->one_load = 1;
1336 else
1337 web->one_load = 0;
1339 else
1340 web->one_load = 0;
1343 if (LABEL_P (insn))
1344 break;
1347 nl_first_reload = ri.nl_size;
1348 if (ri.num_reloads)
1350 int in_ir = 0;
1351 edge e;
1352 int num = 0;
1353 edge_iterator ei;
1354 bitmap_iterator bi;
1356 HARD_REG_SET cum_colors, colors;
1357 CLEAR_HARD_REG_SET (cum_colors);
1358 FOR_EACH_EDGE (e, ei, bb->preds)
1360 int j;
1362 if (num >= 5)
1363 break;
1364 CLEAR_HARD_REG_SET (colors);
1365 EXECUTE_IF_SET_IN_BITMAP (live_at_end[e->src->index], 0, j, bi)
1367 struct web *web = use2web[j];
1368 struct web *aweb = alias (find_web_for_subweb (web));
1369 if (aweb->type != SPILLED)
1370 update_spill_colors (&colors, web, 1);
1372 IOR_HARD_REG_SET (cum_colors, colors);
1373 num++;
1375 if (num == 5)
1376 in_ir = 1;
1378 bitmap_clear (ri.scratch);
1379 EXECUTE_IF_SET_IN_BITMAP (ri.need_reload, 0, j, bi)
1381 struct web *web2 = ID2WEB (j);
1382 struct web *supweb2 = find_web_for_subweb (web2);
1383 struct web *aweb2 = alias (supweb2);
1384 /* block entry is IR boundary for aweb2?
1385 Currently more some tries for good conditions. */
1386 if (((ra_pass > 0 || supweb2->target_of_spilled_move)
1387 && (1 || in_ir || spill_is_free (&cum_colors, aweb2) <= 0))
1388 || (ra_pass == 1
1389 && (in_ir
1390 || spill_is_free (&cum_colors, aweb2) <= 0)))
1392 if (!web2->in_load)
1394 ri.needed_loads[ri.nl_size++] = web2;
1395 web2->in_load = 1;
1397 bitmap_set_bit (ri.scratch, j);
1398 ri.num_reloads--;
1401 bitmap_operation (ri.need_reload, ri.need_reload, ri.scratch,
1402 BITMAP_AND_COMPL);
1405 ri.need_load = 1;
1406 emit_loads (&ri, nl_first_reload, last_block_insn);
1407 gcc_assert (ri.nl_size == 0);
1408 if (!insn)
1409 break;
1411 free (ri.needed_loads);
1412 sbitmap_free (ri.live);
1413 BITMAP_XFREE (ri.scratch);
1414 BITMAP_XFREE (ri.need_reload);
1417 /* WEBS is a web conflicting with a spilled one. Prepare it
1418 to be able to rescan it in the next pass. Mark all it's uses
1419 for checking, and clear the some members of their web parts
1420 (of defs and uses). Notably don't clear the uplink. We don't
1421 change the layout of this web, just it's conflicts.
1422 Also remember all IDs of its uses in USES_AS_BITMAP. */
1424 static void
1425 mark_refs_for_checking (struct web *web, bitmap uses_as_bitmap)
1427 unsigned int i;
1428 for (i = 0; i < web->num_uses; i++)
1430 unsigned int id = DF_REF_ID (web->uses[i]);
1431 SET_BIT (last_check_uses, id);
1432 bitmap_set_bit (uses_as_bitmap, id);
1433 web_parts[df->def_id + id].spanned_deaths = 0;
1434 web_parts[df->def_id + id].crosses_call = 0;
1436 for (i = 0; i < web->num_defs; i++)
1438 unsigned int id = DF_REF_ID (web->defs[i]);
1439 web_parts[id].spanned_deaths = 0;
1440 web_parts[id].crosses_call = 0;
1444 /* The last step of the spill phase is to set up the structures for
1445 incrementally rebuilding the interference graph. We break up
1446 the web part structure of all spilled webs, mark their uses for
1447 rechecking, look at their neighbors, and clean up some global
1448 information, we will rebuild. */
1450 static void
1451 detect_web_parts_to_rebuild (void)
1453 bitmap uses_as_bitmap;
1454 unsigned int i, pass;
1455 struct dlist *d;
1456 sbitmap already_webs = sbitmap_alloc (num_webs);
1458 uses_as_bitmap = BITMAP_XMALLOC ();
1459 if (last_check_uses)
1460 sbitmap_free (last_check_uses);
1461 last_check_uses = sbitmap_alloc (df->use_id);
1462 sbitmap_zero (last_check_uses);
1463 sbitmap_zero (already_webs);
1464 /* We need to recheck all uses of all webs involved in spilling (and the
1465 uses added by spill insns, but those are not analyzed yet).
1466 Those are the spilled webs themselves, webs coalesced to spilled ones,
1467 and webs conflicting with any of them. */
1468 for (pass = 0; pass < 2; pass++)
1469 for (d = (pass == 0) ? WEBS(SPILLED) : WEBS(COALESCED); d; d = d->next)
1471 struct web *web = DLIST_WEB (d);
1472 struct conflict_link *wl;
1473 unsigned int j;
1474 bitmap_iterator bi;
1476 /* This check is only needed for coalesced nodes, but hey. */
1477 if (alias (web)->type != SPILLED)
1478 continue;
1480 /* For the spilled web itself we also need to clear it's
1481 uplink, to be able to rebuild smaller webs. After all
1482 spilling has split the web. */
1483 for (i = 0; i < web->num_uses; i++)
1485 unsigned int id = DF_REF_ID (web->uses[i]);
1486 SET_BIT (last_check_uses, id);
1487 bitmap_set_bit (uses_as_bitmap, id);
1488 web_parts[df->def_id + id].uplink = NULL;
1489 web_parts[df->def_id + id].spanned_deaths = 0;
1490 web_parts[df->def_id + id].crosses_call = 0;
1492 for (i = 0; i < web->num_defs; i++)
1494 unsigned int id = DF_REF_ID (web->defs[i]);
1495 web_parts[id].uplink = NULL;
1496 web_parts[id].spanned_deaths = 0;
1497 web_parts[id].crosses_call = 0;
1500 /* Now look at all neighbors of this spilled web. */
1501 if (web->have_orig_conflicts)
1502 wl = web->orig_conflict_list;
1503 else
1504 wl = web->conflict_list;
1505 for (; wl; wl = wl->next)
1507 if (TEST_BIT (already_webs, wl->t->id))
1508 continue;
1509 SET_BIT (already_webs, wl->t->id);
1510 mark_refs_for_checking (wl->t, uses_as_bitmap);
1512 EXECUTE_IF_SET_IN_BITMAP (web->useless_conflicts, 0, j, bi)
1514 struct web *web2 = ID2WEB (j);
1515 if (TEST_BIT (already_webs, web2->id))
1516 continue;
1517 SET_BIT (already_webs, web2->id);
1518 mark_refs_for_checking (web2, uses_as_bitmap);
1522 /* We also recheck unconditionally all uses of any hardregs. This means
1523 we _can_ delete all these uses from the live_at_end[] bitmaps.
1524 And because we sometimes delete insn referring to hardregs (when
1525 they became useless because they setup a rematerializable pseudo, which
1526 then was rematerialized), some of those uses will go away with the next
1527 df_analyze(). This means we even _must_ delete those uses from
1528 the live_at_end[] bitmaps. For simplicity we simply delete
1529 all of them. */
1530 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1531 if (!fixed_regs[i])
1533 struct df_link *link;
1534 for (link = df->regs[i].uses; link; link = link->next)
1535 if (link->ref)
1536 bitmap_set_bit (uses_as_bitmap, DF_REF_ID (link->ref));
1539 /* The information in live_at_end[] will be rebuild for all uses
1540 we recheck, so clear it here (the uses of spilled webs, might
1541 indeed not become member of it again). */
1542 live_at_end -= 2;
1543 for (i = 0; i < (unsigned int) last_basic_block + 2; i++)
1544 bitmap_operation (live_at_end[i], live_at_end[i], uses_as_bitmap,
1545 BITMAP_AND_COMPL);
1546 live_at_end += 2;
1548 if (dump_file && (debug_new_regalloc & DUMP_REBUILD) != 0)
1550 ra_debug_msg (DUMP_REBUILD, "need to check these uses:\n");
1551 dump_sbitmap_file (dump_file, last_check_uses);
1553 sbitmap_free (already_webs);
1554 BITMAP_XFREE (uses_as_bitmap);
1557 /* Statistics about deleted insns, which are useless now. */
1558 static unsigned int deleted_def_insns;
1559 static unsigned HOST_WIDE_INT deleted_def_cost;
1561 /* In rewrite_program2() we noticed, when a certain insn set a pseudo
1562 which wasn't live. Try to delete all those insns. */
1564 static void
1565 delete_useless_defs (void)
1567 unsigned int i;
1568 bitmap_iterator bi;
1570 /* If the insn only sets the def without any sideeffect (besides
1571 clobbers or uses), we can delete it. single_set() also tests
1572 for INSN_P(insn). */
1573 EXECUTE_IF_SET_IN_BITMAP (useless_defs, 0, i, bi)
1575 rtx insn = DF_REF_INSN (df->defs[i]);
1576 rtx set = single_set (insn);
1577 struct web *web = find_web_for_subweb (def2web[i]);
1578 if (set && web->type == SPILLED && web->stack_slot == NULL)
1580 deleted_def_insns++;
1581 deleted_def_cost += BLOCK_FOR_INSN (insn)->frequency + 1;
1582 PUT_CODE (insn, NOTE);
1583 NOTE_LINE_NUMBER (insn) = NOTE_INSN_DELETED;
1584 df_insn_modify (df, BLOCK_FOR_INSN (insn), insn);
1589 /* Look for spilled webs, on whose behalf no insns were emitted.
1590 We inversify (sp?) the changed flag of the webs, so after this function
1591 a nonzero changed flag means, that this web was not spillable (at least
1592 in this pass). */
1594 static void
1595 detect_non_changed_webs (void)
1597 struct dlist *d, *d_next;
1598 for (d = WEBS(SPILLED); d; d = d_next)
1600 struct web *web = DLIST_WEB (d);
1601 d_next = d->next;
1602 if (!web->changed)
1604 ra_debug_msg (DUMP_PROCESS, "no insns emitted for spilled web %d\n",
1605 web->id);
1606 remove_web_from_list (web);
1607 put_web (web, COLORED);
1608 web->changed = 1;
1610 else
1611 web->changed = 0;
1612 /* From now on web->changed is used as the opposite flag.
1613 I.e. colored webs, which have changed set were formerly
1614 spilled webs for which no insns were emitted. */
1618 /* Before spilling we clear the changed flags for all spilled webs. */
1620 static void
1621 reset_changed_flag (void)
1623 struct dlist *d;
1624 for (d = WEBS(SPILLED); d; d = d->next)
1625 DLIST_WEB(d)->changed = 0;
1628 /* The toplevel function for this file. Given a colorized graph,
1629 and lists of spilled, coalesced and colored webs, we add some
1630 spill code. This also sets up the structures for incrementally
1631 building the interference graph in the next pass. */
1633 void
1634 actual_spill (void)
1636 int i;
1637 bitmap_iterator bi;
1638 bitmap new_deaths = BITMAP_XMALLOC ();
1640 reset_changed_flag ();
1641 spill_coalprop ();
1642 choose_spill_colors ();
1643 useless_defs = BITMAP_XMALLOC ();
1644 if (flag_ra_improved_spilling)
1645 rewrite_program2 (new_deaths);
1646 else
1647 rewrite_program (new_deaths);
1648 insert_stores (new_deaths);
1649 delete_useless_defs ();
1650 BITMAP_XFREE (useless_defs);
1651 sbitmap_free (insns_with_deaths);
1652 insns_with_deaths = sbitmap_alloc (get_max_uid ());
1653 death_insns_max_uid = get_max_uid ();
1654 sbitmap_zero (insns_with_deaths);
1655 EXECUTE_IF_SET_IN_BITMAP (new_deaths, 0, i, bi)
1657 SET_BIT (insns_with_deaths, i);
1659 detect_non_changed_webs ();
1660 detect_web_parts_to_rebuild ();
1661 BITMAP_XFREE (new_deaths);
1664 /* A bitmap of pseudo reg numbers which are coalesced directly
1665 to a hardreg. Set in emit_colors(), used and freed in
1666 remove_suspicious_death_notes(). */
1667 static bitmap regnos_coalesced_to_hardregs;
1669 /* Create new pseudos for each web we colored, change insns to
1670 use those pseudos and set up ra_reg_renumber. */
1672 void
1673 emit_colors (struct df *df)
1675 unsigned int i;
1676 int si;
1677 struct web *web;
1678 int old_max_regno = max_reg_num ();
1679 regset old_regs;
1680 basic_block bb;
1682 /* This bitmap is freed in remove_suspicious_death_notes(),
1683 which is also the user of it. */
1684 regnos_coalesced_to_hardregs = BITMAP_XMALLOC ();
1685 /* First create the (REG xx) rtx's for all webs, as we need to know
1686 the number, to make sure, flow has enough memory for them in the
1687 various tables. */
1688 for (i = 0; i < num_webs - num_subwebs; i++)
1690 web = ID2WEB (i);
1691 if (web->type != COLORED && web->type != COALESCED)
1692 continue;
1693 if (web->type == COALESCED && alias (web)->type == COLORED)
1694 continue;
1695 gcc_assert (!web->reg_rtx);
1696 gcc_assert (web->regno >= FIRST_PSEUDO_REGISTER);
1698 if (web->regno >= max_normal_pseudo)
1700 rtx place;
1701 if (web->color == an_unusable_color)
1703 unsigned int inherent_size = PSEUDO_REGNO_BYTES (web->regno);
1704 unsigned int total_size = MAX (inherent_size, 0);
1705 place = assign_stack_local (PSEUDO_REGNO_MODE (web->regno),
1706 total_size,
1707 inherent_size == total_size ? 0 : -1);
1708 set_mem_alias_set (place, new_alias_set ());
1710 else
1712 place = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1714 web->reg_rtx = place;
1716 else
1718 /* Special case for i386 'fix_truncdi_nomemory' insn.
1719 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1720 Actual only for clobbered register. */
1721 if (web->num_uses == 0 && web->num_defs == 1)
1722 web->reg_rtx = gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web->defs[0])));
1723 else
1724 web->reg_rtx = gen_reg_rtx (PSEUDO_REGNO_MODE (web->regno));
1725 /* Remember the different parts directly coalesced to a hardreg. */
1726 if (web->type == COALESCED)
1727 bitmap_set_bit (regnos_coalesced_to_hardregs, REGNO (web->reg_rtx));
1730 ra_max_regno = max_regno = max_reg_num ();
1731 allocate_reg_info (max_regno, FALSE, FALSE);
1732 ra_reg_renumber = xmalloc (max_regno * sizeof (short));
1733 for (si = 0; si < max_regno; si++)
1734 ra_reg_renumber[si] = -1;
1736 /* Then go through all references, and replace them by a new
1737 pseudoreg for each web. All uses. */
1738 /* XXX
1739 Beware: The order of replacements (first uses, then defs) matters only
1740 for read-mod-write insns, where the RTL expression for the REG is
1741 shared between def and use. For normal rmw insns we connected all such
1742 webs, i.e. both the use and the def (which are the same memory)
1743 there get the same new pseudo-reg, so order would not matter.
1744 _However_ we did not connect webs, were the read cycle was an
1745 uninitialized read. If we now would first replace the def reference
1746 and then the use ref, we would initialize it with a REG rtx, which
1747 gets never initialized, and yet more wrong, which would overwrite
1748 the definition of the other REG rtx. So we must replace the defs last.
1750 for (i = 0; i < df->use_id; i++)
1751 if (df->uses[i])
1753 regset rs = DF_REF_BB (df->uses[i])->global_live_at_start;
1754 rtx regrtx;
1755 web = use2web[i];
1756 web = find_web_for_subweb (web);
1757 if (web->type != COLORED && web->type != COALESCED)
1758 continue;
1759 regrtx = alias (web)->reg_rtx;
1760 if (!regrtx)
1761 regrtx = web->reg_rtx;
1762 *DF_REF_REAL_LOC (df->uses[i]) = regrtx;
1763 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1765 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1766 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1770 /* And all defs. */
1771 for (i = 0; i < df->def_id; i++)
1773 regset rs;
1774 rtx regrtx;
1775 if (!df->defs[i])
1776 continue;
1777 rs = DF_REF_BB (df->defs[i])->global_live_at_start;
1778 web = def2web[i];
1779 web = find_web_for_subweb (web);
1780 if (web->type != COLORED && web->type != COALESCED)
1781 continue;
1782 regrtx = alias (web)->reg_rtx;
1783 if (!regrtx)
1784 regrtx = web->reg_rtx;
1785 *DF_REF_REAL_LOC (df->defs[i]) = regrtx;
1786 if (REGNO_REG_SET_P (rs, web->regno) && REG_P (regrtx))
1788 /* Don't simply clear the current regno, as it might be
1789 replaced by two webs. */
1790 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1791 SET_REGNO_REG_SET (rs, REGNO (regrtx));
1795 /* And now set up the ra_reg_renumber array for reload with all the new
1796 pseudo-regs. */
1797 for (i = 0; i < num_webs - num_subwebs; i++)
1799 web = ID2WEB (i);
1800 if (web->reg_rtx && REG_P (web->reg_rtx))
1802 int r = REGNO (web->reg_rtx);
1803 ra_reg_renumber[r] = web->color;
1804 ra_debug_msg (DUMP_COLORIZE, "Renumber pseudo %d (== web %d) to %d\n",
1805 r, web->id, ra_reg_renumber[r]);
1809 old_regs = BITMAP_XMALLOC ();
1810 for (si = FIRST_PSEUDO_REGISTER; si < old_max_regno; si++)
1811 SET_REGNO_REG_SET (old_regs, si);
1812 FOR_EACH_BB (bb)
1814 AND_COMPL_REG_SET (bb->global_live_at_start, old_regs);
1815 AND_COMPL_REG_SET (bb->global_live_at_end, old_regs);
1817 BITMAP_XFREE (old_regs);
1820 /* Delete some coalesced moves from the insn stream. */
1822 void
1823 delete_moves (void)
1825 struct move_list *ml;
1826 struct web *s, *t;
1827 /* XXX Beware: We normally would test here each copy insn, if
1828 source and target got the same color (either by coalescing or by pure
1829 luck), and then delete it.
1830 This will currently not work. One problem is, that we don't color
1831 the regs ourself, but instead defer to reload. So the colorization
1832 is only a kind of suggestion, which reload doesn't have to follow.
1833 For webs which are coalesced to a normal colored web, we only have one
1834 new pseudo, so in this case we indeed can delete copy insns involving
1835 those (because even if reload colors them different from our suggestion,
1836 it still has to color them the same, as only one pseudo exists). But for
1837 webs coalesced to precolored ones, we have not a single pseudo, but
1838 instead one for each coalesced web. This means, that we can't delete
1839 copy insns, where source and target are webs coalesced to precolored
1840 ones, because then the connection between both webs is destroyed. Note
1841 that this not only means copy insns, where one side is the precolored one
1842 itself, but also those between webs which are coalesced to one color.
1843 Also because reload we can't delete copy insns which involve any
1844 precolored web at all. These often have also special meaning (e.g.
1845 copying a return value of a call to a pseudo, or copying pseudo to the
1846 return register), and the deletion would confuse reload in thinking the
1847 pseudo isn't needed. One of those days reload will get away and we can
1848 do everything we want.
1849 In effect because of the later reload, we can't base our deletion on the
1850 colors itself, but instead need to base them on the newly created
1851 pseudos. */
1852 for (ml = wl_moves; ml; ml = ml->next)
1853 /* The real condition we would ideally use is: s->color == t->color.
1854 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1855 we want to prevent deletion of "special" copies. */
1856 if (ml->move
1857 && (s = alias (ml->move->source_web))->reg_rtx
1858 == (t = alias (ml->move->target_web))->reg_rtx
1859 && s->type != PRECOLORED && t->type != PRECOLORED)
1861 basic_block bb = BLOCK_FOR_INSN (ml->move->insn);
1862 df_insn_delete (df, bb, ml->move->insn);
1863 deleted_move_insns++;
1864 deleted_move_cost += bb->frequency + 1;
1868 /* Due to reasons documented elsewhere we create different pseudos
1869 for all webs coalesced to hardregs. For these parts life_analysis()
1870 might have added REG_DEAD notes without considering, that only this part
1871 but not the whole coalesced web dies. The RTL is correct, there is no
1872 coalescing yet. But if later reload's alter_reg() substitutes the
1873 hardreg into the REG rtx it looks like that particular hardreg dies here,
1874 although (due to coalescing) it still is live. This might make different
1875 places of reload think, it can use that hardreg for reload regs,
1876 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1877 (Or better teach life_analysis() and reload about our coalescing, but
1878 that comes later) Bah. */
1880 void
1881 remove_suspicious_death_notes (void)
1883 rtx insn;
1884 for (insn = get_insns(); insn; insn = NEXT_INSN (insn))
1885 if (INSN_P (insn))
1887 rtx *pnote = &REG_NOTES (insn);
1888 while (*pnote)
1890 rtx note = *pnote;
1891 if ((REG_NOTE_KIND (note) == REG_DEAD
1892 || REG_NOTE_KIND (note) == REG_UNUSED)
1893 && (REG_P (XEXP (note, 0))
1894 && bitmap_bit_p (regnos_coalesced_to_hardregs,
1895 REGNO (XEXP (note, 0)))))
1896 *pnote = XEXP (note, 1);
1897 else
1898 pnote = &XEXP (*pnote, 1);
1901 BITMAP_XFREE (regnos_coalesced_to_hardregs);
1902 regnos_coalesced_to_hardregs = NULL;
1905 /* Allocate space for max_reg_num() pseudo registers, and
1906 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1907 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1909 void
1910 setup_renumber (int free_it)
1912 int i;
1913 max_regno = max_reg_num ();
1914 allocate_reg_info (max_regno, FALSE, TRUE);
1915 for (i = 0; i < max_regno; i++)
1917 reg_renumber[i] = (i < ra_max_regno) ? ra_reg_renumber[i] : -1;
1919 if (free_it)
1921 free (ra_reg_renumber);
1922 ra_reg_renumber = NULL;
1923 ra_max_regno = 0;
1927 /* Dump the costs and savings due to spilling, i.e. of added spill insns
1928 and removed moves or useless defs. */
1930 void
1931 dump_cost (unsigned int level)
1933 ra_debug_msg (level, "Instructions for spilling\n added:\n");
1934 ra_debug_msg (level, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1935 emitted_spill_loads, spill_load_cost);
1936 ra_debug_msg (level, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1937 emitted_spill_stores, spill_store_cost);
1938 ra_debug_msg (level, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1939 emitted_remat, spill_remat_cost);
1940 ra_debug_msg (level, " removed:\n moves =%d cost="
1941 HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1942 deleted_move_insns, deleted_move_cost);
1943 ra_debug_msg (level, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
1944 deleted_def_insns, deleted_def_cost);
1947 /* Initialization of the rewrite phase. */
1949 void
1950 ra_rewrite_init (void)
1952 emitted_spill_loads = 0;
1953 emitted_spill_stores = 0;
1954 emitted_remat = 0;
1955 spill_load_cost = 0;
1956 spill_store_cost = 0;
1957 spill_remat_cost = 0;
1958 deleted_move_insns = 0;
1959 deleted_move_cost = 0;
1960 deleted_def_insns = 0;
1961 deleted_def_cost = 0;
1965 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: