1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
36 #include "insn-config.h"
39 /* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
47 static void spill_coalescing (sbitmap
, sbitmap
);
48 static unsigned HOST_WIDE_INT
spill_prop_savings (struct web
*, sbitmap
);
49 static void spill_prop_insert (struct web
*, sbitmap
, sbitmap
);
50 static int spill_propagation (sbitmap
, sbitmap
, sbitmap
);
51 static void spill_coalprop (void);
52 static void allocate_spill_web (struct web
*);
53 static void choose_spill_colors (void);
54 static void rewrite_program (bitmap
);
55 static void remember_slot (struct rtx_list
**, rtx
);
56 static int slots_overlap_p (rtx
, rtx
);
57 static void delete_overlapping_slots (struct rtx_list
**, rtx
);
58 static int slot_member_p (struct rtx_list
*, rtx
);
59 static void insert_stores (bitmap
);
60 static int spill_same_color_p (struct web
*, struct web
*);
61 static bool is_partly_live_1 (sbitmap
, struct web
*);
62 static void update_spill_colors (HARD_REG_SET
*, struct web
*, int);
63 static int spill_is_free (HARD_REG_SET
*, struct web
*);
64 static void emit_loads (struct rewrite_info
*, int, rtx
);
65 static void reloads_to_loads (struct rewrite_info
*, struct ref
**,
66 unsigned int, struct web
**);
67 static void rewrite_program2 (bitmap
);
68 static void mark_refs_for_checking (struct web
*, bitmap
);
69 static void detect_web_parts_to_rebuild (void);
70 static void delete_useless_defs (void);
71 static void detect_non_changed_webs (void);
72 static void reset_changed_flag (void);
74 /* For tracking some statistics, we count the number (and cost)
75 of deleted move insns. */
76 static unsigned int deleted_move_insns
;
77 static unsigned HOST_WIDE_INT deleted_move_cost
;
79 /* This is the spill coalescing phase. In SPILLED the IDs of all
80 already spilled webs are noted. In COALESCED the IDs of webs still
81 to check for coalescing. This tries to coalesce two webs, which were
82 spilled, are connected by a move, and don't conflict. Greatly
83 reduces memory shuffling. */
86 spill_coalescing (sbitmap coalesce
, sbitmap spilled
)
90 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
91 if ((m
= ml
->move
) != NULL
)
93 struct web
*s
= alias (m
->source_web
);
94 struct web
*t
= alias (m
->target_web
);
95 if ((TEST_BIT (spilled
, s
->id
) && TEST_BIT (coalesce
, t
->id
))
96 || (TEST_BIT (spilled
, t
->id
) && TEST_BIT (coalesce
, s
->id
)))
98 struct conflict_link
*wl
;
99 if (TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
100 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
)
101 || s
->pattern
|| t
->pattern
)
104 deleted_move_insns
++;
105 deleted_move_cost
+= BLOCK_FOR_INSN (m
->insn
)->frequency
+ 1;
106 PUT_CODE (m
->insn
, NOTE
);
107 NOTE_LINE_NUMBER (m
->insn
) = NOTE_INSN_DELETED
;
108 df_insn_modify (df
, BLOCK_FOR_INSN (m
->insn
), m
->insn
);
110 m
->target_web
->target_of_spilled_move
= 1;
112 /* May be, already coalesced due to a former move. */
114 /* Merge the nodes S and T in the I-graph. Beware: the merging
115 of conflicts relies on the fact, that in the conflict list
116 of T all of it's conflicts are noted. This is currently not
117 the case if T would be the target of a coalesced web, because
118 then (in combine () above) only those conflicts were noted in
119 T from the web which was coalesced into T, which at the time
120 of combine() were not already on the SELECT stack or were
121 itself coalesced to something other. */
122 if (t
->type
!= SPILLED
|| s
->type
!= SPILLED
)
124 remove_list (t
->dlink
, &WEBS(SPILLED
));
125 put_web (t
, COALESCED
);
130 for (wl
= t
->conflict_list
; wl
; wl
= wl
->next
)
132 struct web
*pweb
= wl
->t
;
134 record_conflict (s
, pweb
);
137 struct sub_conflict
*sl
;
138 for (sl
= wl
->sub
; sl
; sl
= sl
->next
)
140 struct web
*sweb
= NULL
;
141 if (SUBWEB_P (sl
->s
))
142 sweb
= find_subweb (s
, sl
->s
->orig_x
);
145 record_conflict (sweb
, sl
->t
);
148 /* No decrement_degree here, because we already have colored
149 the graph, and don't want to insert pweb into any other
151 pweb
->num_conflicts
-= 1 + t
->add_hardregs
;
157 /* Returns the probable saving of coalescing WEB with webs from
158 SPILLED, in terms of removed move insn cost. */
160 static unsigned HOST_WIDE_INT
161 spill_prop_savings (struct web
*web
, sbitmap spilled
)
163 unsigned HOST_WIDE_INT savings
= 0;
164 struct move_list
*ml
;
169 cost
= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 1);
170 cost
+= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 0);
171 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
172 if ((m
= ml
->move
) != NULL
)
174 struct web
*s
= alias (m
->source_web
);
175 struct web
*t
= alias (m
->target_web
);
182 if (s
!= web
|| !TEST_BIT (spilled
, t
->id
) || t
->pattern
183 || TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
184 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
))
186 savings
+= BLOCK_FOR_INSN (m
->insn
)->frequency
* cost
;
191 /* This add all IDs of colored webs, which are connected to WEB by a move
192 to LIST and PROCESSED. */
195 spill_prop_insert (struct web
*web
, sbitmap list
, sbitmap processed
)
197 struct move_list
*ml
;
199 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
200 if ((m
= ml
->move
) != NULL
)
202 struct web
*s
= alias (m
->source_web
);
203 struct web
*t
= alias (m
->target_web
);
210 if (s
!= web
|| t
->type
!= COLORED
|| TEST_BIT (processed
, t
->id
))
212 SET_BIT (list
, t
->id
);
213 SET_BIT (processed
, t
->id
);
217 /* The spill propagation pass. If we have to spilled webs, the first
218 connected through a move to a colored one, and the second also connected
219 to that colored one, and this colored web is only used to connect both
220 spilled webs, it might be worthwhile to spill that colored one.
221 This is the case, if the cost of the removed copy insns (all three webs
222 could be placed into the same stack slot) is higher than the spill cost
224 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
225 SPILLED the set of all spilled webs so far and PROCESSED the set
226 of all webs processed so far, so we don't do work twice. */
229 spill_propagation (sbitmap to_prop
, sbitmap spilled
, sbitmap processed
)
233 sbitmap list
= sbitmap_alloc (num_webs
);
236 /* First insert colored move neighbors into the candidate list. */
237 EXECUTE_IF_SET_IN_SBITMAP (to_prop
, 0, id
,
239 spill_prop_insert (ID2WEB (id
), list
, processed
);
241 sbitmap_zero (to_prop
);
243 /* For all candidates, see, if the savings are higher than it's
245 while ((id
= sbitmap_first_set_bit (list
)) >= 0)
247 struct web
*web
= ID2WEB (id
);
248 RESET_BIT (list
, id
);
249 if (spill_prop_savings (web
, spilled
) >= web
->spill_cost
)
251 /* If so, we found a new spilled web. Insert it's colored
252 move neighbors again, and mark, that we need to repeat the
253 whole mainloop of spillprog/coalescing again. */
254 remove_web_from_list (web
);
256 put_web (web
, SPILLED
);
257 SET_BIT (spilled
, id
);
258 SET_BIT (to_prop
, id
);
259 spill_prop_insert (web
, list
, processed
);
267 /* The main phase to improve spill costs. This repeatedly runs
268 spill coalescing and spill propagation, until nothing changes. */
271 spill_coalprop (void)
273 sbitmap spilled
, processed
, to_prop
;
276 spilled
= sbitmap_alloc (num_webs
);
277 processed
= sbitmap_alloc (num_webs
);
278 to_prop
= sbitmap_alloc (num_webs
);
279 sbitmap_zero (spilled
);
280 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
281 SET_BIT (spilled
, DLIST_WEB (d
)->id
);
282 sbitmap_copy (to_prop
, spilled
);
283 sbitmap_zero (processed
);
286 spill_coalescing (to_prop
, spilled
);
287 /* XXX Currently (with optimistic coalescing) spill_propagation()
288 doesn't give better code, sometimes it gives worse (but not by much)
289 code. I believe this is because of slightly wrong cost
290 measurements. Anyway right now it isn't worth the time it takes,
291 so deactivate it for now. */
292 again
= 0 && spill_propagation (to_prop
, spilled
, processed
);
295 sbitmap_free (to_prop
);
296 sbitmap_free (processed
);
297 sbitmap_free (spilled
);
300 /* Allocate a spill slot for a WEB. Currently we spill to pseudo
301 registers, to be able to track also webs for "stack slots", and also
302 to possibly colorize them. These pseudos are sometimes handled
303 in a special way, where we know, that they also can represent
307 allocate_spill_web (struct web
*web
)
309 int regno
= web
->regno
;
313 slot
= gen_reg_rtx (PSEUDO_REGNO_MODE (regno
));
314 web
->stack_slot
= slot
;
317 /* This chooses a color for all SPILLED webs for interference region
318 spilling. The heuristic isn't good in any way. */
321 choose_spill_colors (void)
324 unsigned HOST_WIDE_INT
*costs
= xmalloc (FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
325 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
327 struct web
*web
= DLIST_WEB (d
);
328 struct conflict_link
*wl
;
331 memset (costs
, 0, FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
332 for (wl
= web
->conflict_list
; wl
; wl
= wl
->next
)
334 struct web
*pweb
= wl
->t
;
335 if (pweb
->type
== COLORED
|| pweb
->type
== PRECOLORED
)
336 costs
[pweb
->color
] += pweb
->spill_cost
;
339 COPY_HARD_REG_SET (avail
, web
->usable_regs
);
340 if (web
->crosses_call
)
342 /* Add an arbitrary constant cost to colors not usable by
343 call-crossing webs without saves/loads. */
344 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
345 if (TEST_HARD_REG_BIT (call_used_reg_set
, c
))
349 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
350 if ((bestc
< 0 || costs
[bestc
] > costs
[c
])
351 && TEST_HARD_REG_BIT (avail
, c
)
352 && HARD_REGNO_MODE_OK (c
, PSEUDO_REGNO_MODE (web
->regno
)))
355 size
= hard_regno_nregs
[c
][PSEUDO_REGNO_MODE (web
->regno
)];
357 && TEST_HARD_REG_BIT (avail
, c
+ i
); i
++);
362 ra_debug_msg (DUMP_PROCESS
, "choosing color %d for spilled web %d\n",
369 /* For statistics sake we count the number and cost of all new loads,
370 stores and emitted rematerializations. */
371 static unsigned int emitted_spill_loads
;
372 static unsigned int emitted_spill_stores
;
373 static unsigned int emitted_remat
;
374 static unsigned HOST_WIDE_INT spill_load_cost
;
375 static unsigned HOST_WIDE_INT spill_store_cost
;
376 static unsigned HOST_WIDE_INT spill_remat_cost
;
378 /* In rewrite_program2() we detect if some def us useless, in the sense,
379 that the pseudo set is not live anymore at that point. The REF_IDs
380 of such defs are noted here. */
381 static bitmap useless_defs
;
383 /* This is the simple and fast version of rewriting the program to
384 include spill code. It spills at every insn containing spilled
385 defs or uses. Loads are added only if flag_ra_spill_every_use is
386 nonzero, otherwise only stores will be added. This doesn't
387 support rematerialization.
388 NEW_DEATHS is filled with uids for insns, which probably contain
392 rewrite_program (bitmap new_deaths
)
396 bitmap b
= BITMAP_XMALLOC ();
398 /* We walk over all webs, over all uses/defs. For all webs, we need
399 to look at spilled webs, and webs coalesced to spilled ones, in case
400 their alias isn't broken up, or they got spill coalesced. */
401 for (i
= 0; i
< 2; i
++)
402 for (d
= (i
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
404 struct web
*web
= DLIST_WEB (d
);
405 struct web
*aweb
= alias (web
);
409 /* Is trivially true for spilled webs, but not for coalesced ones. */
410 if (aweb
->type
!= SPILLED
)
413 /* First add loads before every use, if we have to. */
414 if (flag_ra_spill_every_use
)
417 allocate_spill_web (aweb
);
418 slot
= aweb
->stack_slot
;
419 for (j
= 0; j
< web
->num_uses
; j
++)
421 rtx insns
, target
, source
;
422 rtx insn
= DF_REF_INSN (web
->uses
[j
]);
423 rtx prev
= PREV_INSN (insn
);
424 basic_block bb
= BLOCK_FOR_INSN (insn
);
425 /* Happens when spill_coalescing() deletes move insns. */
429 /* Check that we didn't already added a load for this web
430 and insn. Happens, when the an insn uses the same web
432 if (bitmap_bit_p (b
, INSN_UID (insn
)))
434 bitmap_set_bit (b
, INSN_UID (insn
));
435 target
= DF_REF_REG (web
->uses
[j
]);
438 if (GET_CODE (target
) == SUBREG
)
439 source
= simplify_gen_subreg (GET_MODE (target
), source
,
441 SUBREG_BYTE (target
));
442 ra_emit_move_insn (target
, source
);
443 insns
= get_insns ();
445 emit_insn_before (insns
, insn
);
447 if (BB_HEAD (bb
) == insn
)
448 BB_HEAD (bb
) = NEXT_INSN (prev
);
449 for (insn
= PREV_INSN (insn
); insn
!= prev
;
450 insn
= PREV_INSN (insn
))
452 set_block_for_insn (insn
, bb
);
453 df_insn_modify (df
, bb
, insn
);
456 emitted_spill_loads
++;
457 spill_load_cost
+= bb
->frequency
+ 1;
461 /* Now emit the stores after each def.
462 If any uses were loaded from stackslots (compared to
463 rematerialized or not reloaded due to IR spilling),
464 aweb->stack_slot will be set. If not, we don't need to emit
466 slot
= aweb
->stack_slot
;
469 for (j
= 0; j
< web
->num_defs
; j
++)
471 rtx insns
, source
, dest
;
472 rtx insn
= DF_REF_INSN (web
->defs
[j
]);
473 rtx following
= NEXT_INSN (insn
);
474 basic_block bb
= BLOCK_FOR_INSN (insn
);
475 /* Happens when spill_coalescing() deletes move insns. */
478 if (bitmap_bit_p (b
, INSN_UID (insn
)))
480 bitmap_set_bit (b
, INSN_UID (insn
));
482 source
= DF_REF_REG (web
->defs
[j
]);
484 if (GET_CODE (source
) == SUBREG
)
485 dest
= simplify_gen_subreg (GET_MODE (source
), dest
,
487 SUBREG_BYTE (source
));
488 ra_emit_move_insn (dest
, source
);
490 insns
= get_insns ();
494 emit_insn_after (insns
, insn
);
495 if (BB_END (bb
) == insn
)
496 BB_END (bb
) = PREV_INSN (following
);
497 for (insn
= insns
; insn
!= following
; insn
= NEXT_INSN (insn
))
499 set_block_for_insn (insn
, bb
);
500 df_insn_modify (df
, bb
, insn
);
504 df_insn_modify (df
, bb
, insn
);
505 emitted_spill_stores
++;
506 spill_store_cost
+= bb
->frequency
+ 1;
507 /* XXX we should set new_deaths for all inserted stores
508 whose pseudo dies here.
509 Note, that this isn't the case for _all_ stores. */
510 /* I.e. the next is wrong, and might cause some spilltemps
511 to be categorized as spilltemp2's (i.e. live over a death),
512 although they aren't. This might make them spill again,
513 which causes endlessness in the case, this insn is in fact
515 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
522 /* A simple list of rtx's. */
525 struct rtx_list
*next
;
529 /* Adds X to *LIST. */
532 remember_slot (struct rtx_list
**list
, rtx x
)
535 /* PRE: X is not already in LIST. */
536 l
= ra_alloc (sizeof (*l
));
542 /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
543 thereof), return nonzero, if they overlap. REGs and MEMs don't
544 overlap, and if they are MEMs they must have an easy address
545 (plus (basereg) (const_inst x)), otherwise they overlap. */
548 slots_overlap_p (rtx s1
, rtx s2
)
551 HOST_WIDE_INT ofs1
= 0, ofs2
= 0;
552 int size1
= GET_MODE_SIZE (GET_MODE (s1
));
553 int size2
= GET_MODE_SIZE (GET_MODE (s2
));
554 if (GET_CODE (s1
) == SUBREG
)
555 ofs1
= SUBREG_BYTE (s1
), s1
= SUBREG_REG (s1
);
556 if (GET_CODE (s2
) == SUBREG
)
557 ofs2
= SUBREG_BYTE (s2
), s2
= SUBREG_REG (s2
);
562 if (GET_CODE (s1
) != GET_CODE (s2
))
565 if (REG_P (s1
) && REG_P (s2
))
567 if (REGNO (s1
) != REGNO (s2
))
569 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
573 if (GET_CODE (s1
) != MEM
|| GET_CODE (s2
) != MEM
)
577 if (GET_CODE (s1
) != PLUS
|| !REG_P (XEXP (s1
, 0))
578 || GET_CODE (XEXP (s1
, 1)) != CONST_INT
)
580 if (GET_CODE (s2
) != PLUS
|| !REG_P (XEXP (s2
, 0))
581 || GET_CODE (XEXP (s2
, 1)) != CONST_INT
)
583 base1
= XEXP (s1
, 0);
584 base2
= XEXP (s2
, 0);
585 if (!rtx_equal_p (base1
, base2
))
587 ofs1
+= INTVAL (XEXP (s1
, 1));
588 ofs2
+= INTVAL (XEXP (s2
, 1));
589 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
594 /* This deletes from *LIST all rtx's which overlap with X in the sense
595 of slots_overlap_p(). */
598 delete_overlapping_slots (struct rtx_list
**list
, rtx x
)
602 if (slots_overlap_p ((*list
)->x
, x
))
603 *list
= (*list
)->next
;
605 list
= &((*list
)->next
);
609 /* Returns nonzero, of X is member of LIST. */
612 slot_member_p (struct rtx_list
*list
, rtx x
)
614 for (;list
; list
= list
->next
)
615 if (rtx_equal_p (list
->x
, x
))
620 /* A more sophisticated (and slower) method of adding the stores, than
621 rewrite_program(). This goes backward the insn stream, adding
622 stores as it goes, but only if it hasn't just added a store to the
623 same location. NEW_DEATHS is a bitmap filled with uids of insns
624 containing deaths. */
627 insert_stores (bitmap new_deaths
)
630 rtx last_slot
= NULL_RTX
;
631 struct rtx_list
*slots
= NULL
;
633 /* We go simply backwards over basic block borders. */
634 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
636 int uid
= INSN_UID (insn
);
638 /* If we reach a basic block border, which has more than one
639 outgoing edge, we simply forget all already emitted stores. */
640 if (GET_CODE (insn
) == BARRIER
641 || JUMP_P (insn
) || can_throw_internal (insn
))
643 last_slot
= NULL_RTX
;
649 /* If this insn was not just added in this pass. */
650 if (uid
< insn_df_max_uid
)
653 rtx following
= NEXT_INSN (insn
);
654 basic_block bb
= BLOCK_FOR_INSN (insn
);
655 struct ra_insn_info info
;
658 for (n
= 0; n
< info
.num_defs
; n
++)
660 struct web
*web
= def2web
[DF_REF_ID (info
.defs
[n
])];
661 struct web
*aweb
= alias (find_web_for_subweb (web
));
663 if (aweb
->type
!= SPILLED
|| !aweb
->stack_slot
)
665 slot
= aweb
->stack_slot
;
666 source
= DF_REF_REG (info
.defs
[n
]);
667 /* adjust_address() might generate code. */
669 if (GET_CODE (source
) == SUBREG
)
670 slot
= simplify_gen_subreg (GET_MODE (source
), slot
,
672 SUBREG_BYTE (source
));
673 /* If we have no info about emitted stores, or it didn't
674 contain the location we intend to use soon, then
676 if ((!last_slot
|| !rtx_equal_p (slot
, last_slot
))
677 && ! slot_member_p (slots
, slot
))
681 remember_slot (&slots
, slot
);
682 ra_emit_move_insn (slot
, source
);
683 insns
= get_insns ();
687 emit_insn_after (insns
, insn
);
688 if (BB_END (bb
) == insn
)
689 BB_END (bb
) = PREV_INSN (following
);
690 for (ni
= insns
; ni
!= following
; ni
= NEXT_INSN (ni
))
692 set_block_for_insn (ni
, bb
);
693 df_insn_modify (df
, bb
, ni
);
697 df_insn_modify (df
, bb
, insn
);
698 emitted_spill_stores
++;
699 spill_store_cost
+= bb
->frequency
+ 1;
700 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
704 /* Otherwise ignore insns from adjust_address() above. */
709 /* If we look at a load generated by the allocator, forget
710 the last emitted slot, and additionally clear all slots
711 overlapping it's source (after all, we need it again). */
712 /* XXX If we emit the stack-ref directly into the using insn the
713 following needs a change, because that is no new insn. Preferably
714 we would add some notes to the insn, what stackslots are needed
716 if (uid
>= last_max_uid
)
718 rtx set
= single_set (insn
);
719 last_slot
= NULL_RTX
;
720 /* If this was no simple set, give up, and forget everything. */
725 if (1 || GET_CODE (SET_SRC (set
)) == MEM
)
726 delete_overlapping_slots (&slots
, SET_SRC (set
));
732 /* Returns 1 if both colored webs have some hardregs in common, even if
733 they are not the same width. */
736 spill_same_color_p (struct web
*web1
, struct web
*web2
)
738 int c1
, size1
, c2
, size2
;
739 if ((c1
= alias (web1
)->color
) < 0 || c1
== an_unusable_color
)
741 if ((c2
= alias (web2
)->color
) < 0 || c2
== an_unusable_color
)
744 size1
= web1
->type
== PRECOLORED
745 ? 1 : hard_regno_nregs
[c1
][PSEUDO_REGNO_MODE (web1
->regno
)];
746 size2
= web2
->type
== PRECOLORED
747 ? 1 : hard_regno_nregs
[c2
][PSEUDO_REGNO_MODE (web2
->regno
)];
748 if (c1
>= c2
+ size2
|| c2
>= c1
+ size1
)
753 /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
754 subwebs (or WEB itself) is live. */
757 is_partly_live_1 (sbitmap live
, struct web
*web
)
760 if (TEST_BIT (live
, web
->id
))
762 while ((web
= web
->subreg_next
));
766 /* Fast version in case WEB has no subwebs. */
767 #define is_partly_live(live, web) ((!web->subreg_next) \
768 ? TEST_BIT (live, web->id) \
769 : is_partly_live_1 (live, web))
771 /* Change the set of currently IN_USE colors according to
772 WEB's color. Either add those colors to the hardreg set (if ADD
773 is nonzero), or remove them. */
776 update_spill_colors (HARD_REG_SET
*in_use
, struct web
*web
, int add
)
779 if ((c
= alias (find_web_for_subweb (web
))->color
) < 0
780 || c
== an_unusable_color
)
782 size
= hard_regno_nregs
[c
][GET_MODE (web
->orig_x
)];
785 c
+= subreg_regno_offset (c
, GET_MODE (SUBREG_REG (web
->orig_x
)),
786 SUBREG_BYTE (web
->orig_x
),
787 GET_MODE (web
->orig_x
));
789 else if (web
->type
== PRECOLORED
)
793 SET_HARD_REG_BIT (*in_use
, c
+ size
);
796 CLEAR_HARD_REG_BIT (*in_use
, c
+ size
);
799 /* Given a set of hardregs currently IN_USE and the color C of WEB,
800 return -1 if WEB has no color, 1 of it has the unusable color,
801 0 if one of it's used hardregs are in use, and 1 otherwise.
802 Generally, if WEB can't be left colorized return 1. */
805 spill_is_free (HARD_REG_SET
*in_use
, struct web
*web
)
808 if ((c
= alias (web
)->color
) < 0)
810 if (c
== an_unusable_color
)
812 size
= web
->type
== PRECOLORED
813 ? 1 : hard_regno_nregs
[c
][PSEUDO_REGNO_MODE (web
->regno
)];
815 if (TEST_HARD_REG_BIT (*in_use
, c
+ size
))
821 /* Structure for passing between rewrite_program2() and emit_loads(). */
824 /* The web IDs which currently would need a reload. These are
825 currently live spilled webs, whose color was still free. */
827 /* We need a scratch bitmap, but don't want to allocate one a zillion
830 /* Web IDs of currently live webs. This are the precise IDs,
831 not just those of the superwebs. If only on part is live, only
832 that ID is placed here. */
834 /* An array of webs, which currently need a load added.
835 They will be emitted when seeing the first death. */
836 struct web
**needed_loads
;
837 /* The current number of entries in needed_loads. */
839 /* The number of bits set in need_reload. */
841 /* The current set of hardregs not available. */
842 HARD_REG_SET colors_in_use
;
843 /* Nonzero, if we just added some spill temps to need_reload or
844 needed_loads. In this case we don't wait for the next death
845 to emit their loads. */
846 int any_spilltemps_spilled
;
847 /* Nonzero, if we currently need to emit the loads. E.g. when we
848 saw an insn containing deaths. */
852 /* The needed_loads list of RI contains some webs for which
853 we add the actual load insns here. They are added just before
854 their use last seen. NL_FIRST_RELOAD is the index of the first
855 load which is a converted reload, all other entries are normal
856 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
859 emit_loads (struct rewrite_info
*ri
, int nl_first_reload
, rtx last_block_insn
)
862 for (j
= ri
->nl_size
; j
;)
864 struct web
*web
= ri
->needed_loads
[--j
];
868 rtx before
= NULL_RTX
, after
= NULL_RTX
;
870 /* When spilltemps were spilled for the last insns, their
871 loads already are emitted, which is noted by setting
872 needed_loads[] for it to 0. */
875 supweb
= find_web_for_subweb (web
);
876 if (supweb
->regno
>= max_normal_pseudo
)
878 /* Check for web being a spilltemp, if we only want to
879 load spilltemps. Also remember, that we emitted that
880 load, which we don't need to do when we have a death,
881 because then all of needed_loads[] is emptied. */
884 if (!supweb
->spill_temp
)
887 ri
->needed_loads
[j
] = 0;
890 /* The adding of reloads doesn't depend on liveness. */
891 if (j
< nl_first_reload
&& !TEST_BIT (ri
->live
, web
->id
))
893 aweb
= alias (supweb
);
898 /* XXX If we later allow non-constant sources for rematerialization
899 we must also disallow coalescing _to_ rematerialized webs
900 (at least then disallow spilling them, which we already ensure
901 when flag_ra_break_aliases), or not take the pattern but a
905 slot
= copy_rtx (supweb
->pattern
);
906 reg
= copy_rtx (supweb
->orig_x
);
907 /* Sanity check. orig_x should be a REG rtx, which should be
908 shared over all RTL, so copy_rtx should have no effect. */
909 if (reg
!= supweb
->orig_x
)
914 allocate_spill_web (aweb
);
915 slot
= aweb
->stack_slot
;
917 /* If we don't copy the RTL there might be some SUBREG
918 rtx shared in the next iteration although being in
919 different webs, which leads to wrong code. */
920 reg
= copy_rtx (web
->orig_x
);
921 if (GET_CODE (reg
) == SUBREG
)
922 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
924 slot
= simplify_gen_subreg (GET_MODE (reg
), slot
, GET_MODE (slot
),
927 ra_emit_move_insn (reg
, slot
);
930 before
= web
->last_use_insn
;
931 web
->last_use_insn
= NULL_RTX
;
934 if (JUMP_P (last_block_insn
))
935 before
= last_block_insn
;
937 after
= last_block_insn
;
941 rtx foll
= NEXT_INSN (after
);
942 bb
= BLOCK_FOR_INSN (after
);
943 emit_insn_after (ni
, after
);
944 if (BB_END (bb
) == after
)
945 BB_END (bb
) = PREV_INSN (foll
);
946 for (ni
= NEXT_INSN (after
); ni
!= foll
; ni
= NEXT_INSN (ni
))
948 set_block_for_insn (ni
, bb
);
949 df_insn_modify (df
, bb
, ni
);
954 rtx prev
= PREV_INSN (before
);
955 bb
= BLOCK_FOR_INSN (before
);
956 emit_insn_before (ni
, before
);
957 if (BB_HEAD (bb
) == before
)
958 BB_HEAD (bb
) = NEXT_INSN (prev
);
959 for (; ni
!= before
; ni
= NEXT_INSN (ni
))
961 set_block_for_insn (ni
, bb
);
962 df_insn_modify (df
, bb
, ni
);
968 spill_remat_cost
+= bb
->frequency
+ 1;
972 emitted_spill_loads
++;
973 spill_load_cost
+= bb
->frequency
+ 1;
975 RESET_BIT (ri
->live
, web
->id
);
976 /* In the special case documented above only emit the reloads and
978 if (ri
->need_load
== 2 && j
< nl_first_reload
)
985 /* Given a set of reloads in RI, an array of NUM_REFS references (either
986 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
987 (either use2web or def2web) convert some reloads to loads.
988 This looks at the webs referenced, and how they change the set of
989 available colors. Now put all still live webs, which needed reloads,
990 and whose colors isn't free anymore, on the needed_loads list. */
993 reloads_to_loads (struct rewrite_info
*ri
, struct ref
**refs
,
994 unsigned int num_refs
, struct web
**ref2web
)
997 int num_reloads
= ri
->num_reloads
;
998 for (n
= 0; n
< num_refs
&& num_reloads
; n
++)
1000 struct web
*web
= ref2web
[DF_REF_ID (refs
[n
])];
1001 struct web
*supweb
= find_web_for_subweb (web
);
1004 /* Only emit reloads when entering their interference
1005 region. A use of a spilled web never opens an
1006 interference region, independent of it's color. */
1007 if (alias (supweb
)->type
== SPILLED
)
1009 if (supweb
->type
== PRECOLORED
1010 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1012 /* Note, that if web (and supweb) are DEFs, we already cleared
1013 the corresponding bits in live. I.e. is_death becomes true, which
1015 is_death
= !TEST_BIT (ri
->live
, supweb
->id
);
1016 is_death
&= !TEST_BIT (ri
->live
, web
->id
);
1019 int old_num_r
= num_reloads
;
1020 bitmap_clear (ri
->scratch
);
1021 EXECUTE_IF_SET_IN_BITMAP (ri
->need_reload
, 0, j
,
1023 struct web
*web2
= ID2WEB (j
);
1024 struct web
*aweb2
= alias (find_web_for_subweb (web2
));
1025 if (spill_is_free (&(ri
->colors_in_use
), aweb2
) == 0)
1027 if (spill_same_color_p (supweb
, aweb2
)
1028 /* && interfere (web, web2) */)
1032 ri
->needed_loads
[ri
->nl_size
++] = web2
;
1035 bitmap_set_bit (ri
->scratch
, j
);
1039 if (num_reloads
!= old_num_r
)
1040 bitmap_operation (ri
->need_reload
, ri
->need_reload
, ri
->scratch
,
1044 ri
->num_reloads
= num_reloads
;
1047 /* This adds loads for spilled webs to the program. It uses a kind of
1048 interference region spilling. If flag_ra_ir_spilling is zero it
1049 only uses improved chaitin spilling (adding loads only at insns
1050 containing deaths). */
1053 rewrite_program2 (bitmap new_deaths
)
1055 basic_block bb
= NULL
;
1056 int nl_first_reload
;
1057 struct rewrite_info ri
;
1059 ri
.needed_loads
= xmalloc (num_webs
* sizeof (struct web
*));
1060 ri
.need_reload
= BITMAP_XMALLOC ();
1061 ri
.scratch
= BITMAP_XMALLOC ();
1062 ri
.live
= sbitmap_alloc (num_webs
);
1065 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
1067 basic_block last_bb
= NULL
;
1068 rtx last_block_insn
;
1071 insn
= prev_real_insn (insn
);
1072 while (insn
&& !(bb
= BLOCK_FOR_INSN (insn
)))
1073 insn
= prev_real_insn (insn
);
1077 last_block_insn
= insn
;
1079 sbitmap_zero (ri
.live
);
1080 CLEAR_HARD_REG_SET (ri
.colors_in_use
);
1081 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[i
- 2], 0, j
,
1083 struct web
*web
= use2web
[j
];
1084 struct web
*aweb
= alias (find_web_for_subweb (web
));
1085 /* A web is only live at end, if it isn't spilled. If we wouldn't
1086 check this, the last uses of spilled web per basic block
1087 wouldn't be detected as deaths, although they are in the final
1088 code. This would lead to cumulating many loads without need,
1089 only increasing register pressure. */
1090 /* XXX do add also spilled webs which got a color for IR spilling.
1091 Remember to not add to colors_in_use in that case. */
1092 if (aweb
->type
!= SPILLED
/*|| aweb->color >= 0*/)
1094 SET_BIT (ri
.live
, web
->id
);
1095 if (aweb
->type
!= SPILLED
)
1096 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1100 bitmap_clear (ri
.need_reload
);
1102 ri
.any_spilltemps_spilled
= 0;
1103 if (flag_ra_ir_spilling
)
1107 /* XXX If we don't add spilled nodes into live above, the following
1108 becomes an empty loop. */
1109 for (pass
= 0; pass
< 2; pass
++)
1110 for (d
= (pass
) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1112 struct web
*web
= DLIST_WEB (d
);
1113 struct web
*aweb
= alias (web
);
1114 if (aweb
->type
!= SPILLED
)
1116 if (is_partly_live (ri
.live
, web
)
1117 && spill_is_free (&(ri
.colors_in_use
), web
) > 0)
1120 bitmap_set_bit (ri
.need_reload
, web
->id
);
1121 /* Last using insn is somewhere in another block. */
1122 web
->last_use_insn
= NULL_RTX
;
1128 for (; insn
; insn
= PREV_INSN (insn
))
1130 struct ra_insn_info info
;
1133 memset (&info
, 0, sizeof info
);
1135 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
) != last_bb
)
1137 int index
= BLOCK_FOR_INSN (insn
)->index
+ 2;
1138 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[index
- 2], 0, j
,
1140 struct web
*web
= use2web
[j
];
1141 struct web
*aweb
= alias (find_web_for_subweb (web
));
1142 if (aweb
->type
!= SPILLED
)
1144 SET_BIT (ri
.live
, web
->id
);
1145 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1148 bitmap_clear (ri
.scratch
);
1149 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1151 struct web
*web2
= ID2WEB (j
);
1152 struct web
*supweb2
= find_web_for_subweb (web2
);
1153 struct web
*aweb2
= alias (supweb2
);
1154 if (spill_is_free (&(ri
.colors_in_use
), aweb2
) <= 0)
1158 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1161 bitmap_set_bit (ri
.scratch
, j
);
1165 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1167 last_bb
= BLOCK_FOR_INSN (insn
);
1168 last_block_insn
= insn
;
1169 if (!INSN_P (last_block_insn
))
1170 last_block_insn
= prev_real_insn (last_block_insn
);
1175 info
= insn_df
[INSN_UID (insn
)];
1178 for (n
= 0; n
< info
.num_defs
; n
++)
1180 struct ref
*ref
= info
.defs
[n
];
1181 struct web
*web
= def2web
[DF_REF_ID (ref
)];
1182 struct web
*supweb
= find_web_for_subweb (web
);
1186 supweb
= find_web_for_subweb (web
);
1187 /* Webs which are defined here, but also used in the same insn
1188 are rmw webs, or this use isn't a death because of looping
1189 constructs. In neither case makes this def available it's
1190 resources. Reloads for it are still needed, it's still
1191 live and it's colors don't become free. */
1192 for (n2
= 0; n2
< info
.num_uses
; n2
++)
1194 struct web
*web2
= use2web
[DF_REF_ID (info
.uses
[n2
])];
1195 if (supweb
== find_web_for_subweb (web2
))
1204 if (!is_partly_live (ri
.live
, supweb
))
1205 bitmap_set_bit (useless_defs
, DF_REF_ID (ref
));
1207 RESET_BIT (ri
.live
, web
->id
);
1208 if (bitmap_bit_p (ri
.need_reload
, web
->id
))
1211 bitmap_clear_bit (ri
.need_reload
, web
->id
);
1215 /* XXX subwebs aren't precisely tracked here. We have
1216 everything we need (inverse webs), but the code isn't
1217 yet written. We need to make all completely
1218 overlapping web parts non-live here. */
1219 /* If by luck now the whole web isn't live anymore, no
1220 reloads for it are needed. */
1221 if (!is_partly_live (ri
.live
, supweb
)
1222 && bitmap_bit_p (ri
.need_reload
, supweb
->id
))
1225 bitmap_clear_bit (ri
.need_reload
, supweb
->id
);
1231 /* If the whole web is defined here, no parts of it are
1232 live anymore and no reloads are needed for them. */
1233 for (sweb
= supweb
->subreg_next
; sweb
;
1234 sweb
= sweb
->subreg_next
)
1236 RESET_BIT (ri
.live
, sweb
->id
);
1237 if (bitmap_bit_p (ri
.need_reload
, sweb
->id
))
1240 bitmap_clear_bit (ri
.need_reload
, sweb
->id
);
1244 if (alias (supweb
)->type
!= SPILLED
)
1245 update_spill_colors (&(ri
.colors_in_use
), web
, 0);
1248 nl_first_reload
= ri
.nl_size
;
1250 /* CALL_INSNs are not really deaths, but still more registers
1251 are free after a call, than before.
1252 XXX Note, that sometimes reload barfs when we emit insns between
1253 a call and the insn which copies the return register into a
1255 if (GET_CODE (insn
) == CALL_INSN
)
1257 else if (INSN_P (insn
))
1258 for (n
= 0; n
< info
.num_uses
; n
++)
1260 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1261 struct web
*supweb
= find_web_for_subweb (web
);
1263 if (supweb
->type
== PRECOLORED
1264 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1266 is_death
= !TEST_BIT (ri
.live
, supweb
->id
);
1267 is_death
&= !TEST_BIT (ri
.live
, web
->id
);
1271 bitmap_set_bit (new_deaths
, INSN_UID (insn
));
1276 if (INSN_P (insn
) && ri
.num_reloads
)
1278 int old_num_reloads
= ri
.num_reloads
;
1279 reloads_to_loads (&ri
, info
.uses
, info
.num_uses
, use2web
);
1281 /* If this insn sets a pseudo, which isn't used later
1282 (i.e. wasn't live before) it is a dead store. We need
1283 to emit all reloads which have the same color as this def.
1284 We don't need to check for non-liveness here to detect
1285 the deadness (it anyway is too late, as we already cleared
1286 the liveness in the first loop over the defs), because if it
1287 _would_ be live here, no reload could have that color, as
1288 they would already have been converted to a load. */
1290 reloads_to_loads (&ri
, info
.defs
, info
.num_defs
, def2web
);
1291 if (ri
.num_reloads
!= old_num_reloads
&& !ri
.need_load
)
1295 if (ri
.nl_size
&& (ri
.need_load
|| ri
.any_spilltemps_spilled
))
1296 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1298 if (INSN_P (insn
) && flag_ra_ir_spilling
)
1299 for (n
= 0; n
< info
.num_uses
; n
++)
1301 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1302 struct web
*aweb
= alias (find_web_for_subweb (web
));
1303 if (aweb
->type
!= SPILLED
)
1304 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1307 ri
.any_spilltemps_spilled
= 0;
1309 for (n
= 0; n
< info
.num_uses
; n
++)
1311 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1312 struct web
*supweb
= find_web_for_subweb (web
);
1313 struct web
*aweb
= alias (supweb
);
1314 SET_BIT (ri
.live
, web
->id
);
1315 if (aweb
->type
!= SPILLED
)
1317 if (supweb
->spill_temp
)
1318 ri
.any_spilltemps_spilled
= 1;
1319 web
->last_use_insn
= insn
;
1322 if (spill_is_free (&(ri
.colors_in_use
), aweb
) <= 0
1323 || !flag_ra_ir_spilling
)
1325 ri
.needed_loads
[ri
.nl_size
++] = web
;
1329 else if (!bitmap_bit_p (ri
.need_reload
, web
->id
))
1331 bitmap_set_bit (ri
.need_reload
, web
->id
);
1342 if (GET_CODE (insn
) == CODE_LABEL
)
1346 nl_first_reload
= ri
.nl_size
;
1352 HARD_REG_SET cum_colors
, colors
;
1353 CLEAR_HARD_REG_SET (cum_colors
);
1354 for (e
= bb
->pred
; e
&& num
< 5; e
= e
->pred_next
, num
++)
1357 CLEAR_HARD_REG_SET (colors
);
1358 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[e
->src
->index
], 0, j
,
1360 struct web
*web
= use2web
[j
];
1361 struct web
*aweb
= alias (find_web_for_subweb (web
));
1362 if (aweb
->type
!= SPILLED
)
1363 update_spill_colors (&colors
, web
, 1);
1365 IOR_HARD_REG_SET (cum_colors
, colors
);
1370 bitmap_clear (ri
.scratch
);
1371 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1373 struct web
*web2
= ID2WEB (j
);
1374 struct web
*supweb2
= find_web_for_subweb (web2
);
1375 struct web
*aweb2
= alias (supweb2
);
1376 /* block entry is IR boundary for aweb2?
1377 Currently more some tries for good conditions. */
1378 if (((ra_pass
> 0 || supweb2
->target_of_spilled_move
)
1379 && (1 || in_ir
|| spill_is_free (&cum_colors
, aweb2
) <= 0))
1382 || spill_is_free (&cum_colors
, aweb2
) <= 0)))
1386 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1389 bitmap_set_bit (ri
.scratch
, j
);
1393 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1398 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1399 if (ri
.nl_size
!= 0 /*|| ri.num_reloads != 0*/)
1404 free (ri
.needed_loads
);
1405 sbitmap_free (ri
.live
);
1406 BITMAP_XFREE (ri
.scratch
);
1407 BITMAP_XFREE (ri
.need_reload
);
1410 /* WEBS is a web conflicting with a spilled one. Prepare it
1411 to be able to rescan it in the next pass. Mark all it's uses
1412 for checking, and clear the some members of their web parts
1413 (of defs and uses). Notably don't clear the uplink. We don't
1414 change the layout of this web, just it's conflicts.
1415 Also remember all IDs of its uses in USES_AS_BITMAP. */
1418 mark_refs_for_checking (struct web
*web
, bitmap uses_as_bitmap
)
1421 for (i
= 0; i
< web
->num_uses
; i
++)
1423 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1424 SET_BIT (last_check_uses
, id
);
1425 bitmap_set_bit (uses_as_bitmap
, id
);
1426 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1427 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1429 for (i
= 0; i
< web
->num_defs
; i
++)
1431 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1432 web_parts
[id
].spanned_deaths
= 0;
1433 web_parts
[id
].crosses_call
= 0;
1437 /* The last step of the spill phase is to set up the structures for
1438 incrementally rebuilding the interference graph. We break up
1439 the web part structure of all spilled webs, mark their uses for
1440 rechecking, look at their neighbors, and clean up some global
1441 information, we will rebuild. */
1444 detect_web_parts_to_rebuild (void)
1446 bitmap uses_as_bitmap
;
1447 unsigned int i
, pass
;
1449 sbitmap already_webs
= sbitmap_alloc (num_webs
);
1451 uses_as_bitmap
= BITMAP_XMALLOC ();
1452 if (last_check_uses
)
1453 sbitmap_free (last_check_uses
);
1454 last_check_uses
= sbitmap_alloc (df
->use_id
);
1455 sbitmap_zero (last_check_uses
);
1456 sbitmap_zero (already_webs
);
1457 /* We need to recheck all uses of all webs involved in spilling (and the
1458 uses added by spill insns, but those are not analyzed yet).
1459 Those are the spilled webs themselves, webs coalesced to spilled ones,
1460 and webs conflicting with any of them. */
1461 for (pass
= 0; pass
< 2; pass
++)
1462 for (d
= (pass
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1464 struct web
*web
= DLIST_WEB (d
);
1465 struct conflict_link
*wl
;
1467 /* This check is only needed for coalesced nodes, but hey. */
1468 if (alias (web
)->type
!= SPILLED
)
1471 /* For the spilled web itself we also need to clear it's
1472 uplink, to be able to rebuild smaller webs. After all
1473 spilling has split the web. */
1474 for (i
= 0; i
< web
->num_uses
; i
++)
1476 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1477 SET_BIT (last_check_uses
, id
);
1478 bitmap_set_bit (uses_as_bitmap
, id
);
1479 web_parts
[df
->def_id
+ id
].uplink
= NULL
;
1480 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1481 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1483 for (i
= 0; i
< web
->num_defs
; i
++)
1485 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1486 web_parts
[id
].uplink
= NULL
;
1487 web_parts
[id
].spanned_deaths
= 0;
1488 web_parts
[id
].crosses_call
= 0;
1491 /* Now look at all neighbors of this spilled web. */
1492 if (web
->have_orig_conflicts
)
1493 wl
= web
->orig_conflict_list
;
1495 wl
= web
->conflict_list
;
1496 for (; wl
; wl
= wl
->next
)
1498 if (TEST_BIT (already_webs
, wl
->t
->id
))
1500 SET_BIT (already_webs
, wl
->t
->id
);
1501 mark_refs_for_checking (wl
->t
, uses_as_bitmap
);
1503 EXECUTE_IF_SET_IN_BITMAP (web
->useless_conflicts
, 0, j
,
1505 struct web
*web2
= ID2WEB (j
);
1506 if (TEST_BIT (already_webs
, web2
->id
))
1508 SET_BIT (already_webs
, web2
->id
);
1509 mark_refs_for_checking (web2
, uses_as_bitmap
);
1513 /* We also recheck unconditionally all uses of any hardregs. This means
1514 we _can_ delete all these uses from the live_at_end[] bitmaps.
1515 And because we sometimes delete insn referring to hardregs (when
1516 they became useless because they setup a rematerializable pseudo, which
1517 then was rematerialized), some of those uses will go away with the next
1518 df_analyze(). This means we even _must_ delete those uses from
1519 the live_at_end[] bitmaps. For simplicity we simply delete
1521 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1524 struct df_link
*link
;
1525 for (link
= df
->regs
[i
].uses
; link
; link
= link
->next
)
1527 bitmap_set_bit (uses_as_bitmap
, DF_REF_ID (link
->ref
));
1530 /* The information in live_at_end[] will be rebuild for all uses
1531 we recheck, so clear it here (the uses of spilled webs, might
1532 indeed not become member of it again). */
1534 for (i
= 0; i
< (unsigned int) last_basic_block
+ 2; i
++)
1535 bitmap_operation (live_at_end
[i
], live_at_end
[i
], uses_as_bitmap
,
1539 if (dump_file
&& (debug_new_regalloc
& DUMP_REBUILD
) != 0)
1541 ra_debug_msg (DUMP_REBUILD
, "need to check these uses:\n");
1542 dump_sbitmap_file (dump_file
, last_check_uses
);
1544 sbitmap_free (already_webs
);
1545 BITMAP_XFREE (uses_as_bitmap
);
1548 /* Statistics about deleted insns, which are useless now. */
1549 static unsigned int deleted_def_insns
;
1550 static unsigned HOST_WIDE_INT deleted_def_cost
;
1552 /* In rewrite_program2() we noticed, when a certain insn set a pseudo
1553 which wasn't live. Try to delete all those insns. */
1556 delete_useless_defs (void)
1559 /* If the insn only sets the def without any sideeffect (besides
1560 clobbers or uses), we can delete it. single_set() also tests
1561 for INSN_P(insn). */
1562 EXECUTE_IF_SET_IN_BITMAP (useless_defs
, 0, i
,
1564 rtx insn
= DF_REF_INSN (df
->defs
[i
]);
1565 rtx set
= single_set (insn
);
1566 struct web
*web
= find_web_for_subweb (def2web
[i
]);
1567 if (set
&& web
->type
== SPILLED
&& web
->stack_slot
== NULL
)
1569 deleted_def_insns
++;
1570 deleted_def_cost
+= BLOCK_FOR_INSN (insn
)->frequency
+ 1;
1571 PUT_CODE (insn
, NOTE
);
1572 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
1573 df_insn_modify (df
, BLOCK_FOR_INSN (insn
), insn
);
1578 /* Look for spilled webs, on whose behalf no insns were emitted.
1579 We inversify (sp?) the changed flag of the webs, so after this function
1580 a nonzero changed flag means, that this web was not spillable (at least
1584 detect_non_changed_webs (void)
1586 struct dlist
*d
, *d_next
;
1587 for (d
= WEBS(SPILLED
); d
; d
= d_next
)
1589 struct web
*web
= DLIST_WEB (d
);
1593 ra_debug_msg (DUMP_PROCESS
, "no insns emitted for spilled web %d\n",
1595 remove_web_from_list (web
);
1596 put_web (web
, COLORED
);
1601 /* From now on web->changed is used as the opposite flag.
1602 I.e. colored webs, which have changed set were formerly
1603 spilled webs for which no insns were emitted. */
1607 /* Before spilling we clear the changed flags for all spilled webs. */
1610 reset_changed_flag (void)
1613 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
1614 DLIST_WEB(d
)->changed
= 0;
1617 /* The toplevel function for this file. Given a colorized graph,
1618 and lists of spilled, coalesced and colored webs, we add some
1619 spill code. This also sets up the structures for incrementally
1620 building the interference graph in the next pass. */
1626 bitmap new_deaths
= BITMAP_XMALLOC ();
1627 reset_changed_flag ();
1629 choose_spill_colors ();
1630 useless_defs
= BITMAP_XMALLOC ();
1631 if (flag_ra_improved_spilling
)
1632 rewrite_program2 (new_deaths
);
1634 rewrite_program (new_deaths
);
1635 insert_stores (new_deaths
);
1636 delete_useless_defs ();
1637 BITMAP_XFREE (useless_defs
);
1638 sbitmap_free (insns_with_deaths
);
1639 insns_with_deaths
= sbitmap_alloc (get_max_uid ());
1640 death_insns_max_uid
= get_max_uid ();
1641 sbitmap_zero (insns_with_deaths
);
1642 EXECUTE_IF_SET_IN_BITMAP (new_deaths
, 0, i
,
1643 { SET_BIT (insns_with_deaths
, i
);});
1644 detect_non_changed_webs ();
1645 detect_web_parts_to_rebuild ();
1646 BITMAP_XFREE (new_deaths
);
1649 /* A bitmap of pseudo reg numbers which are coalesced directly
1650 to a hardreg. Set in emit_colors(), used and freed in
1651 remove_suspicious_death_notes(). */
1652 static bitmap regnos_coalesced_to_hardregs
;
1654 /* Create new pseudos for each web we colored, change insns to
1655 use those pseudos and set up ra_reg_renumber. */
1658 emit_colors (struct df
*df
)
1663 int old_max_regno
= max_reg_num ();
1667 /* This bitmap is freed in remove_suspicious_death_notes(),
1668 which is also the user of it. */
1669 regnos_coalesced_to_hardregs
= BITMAP_XMALLOC ();
1670 /* First create the (REG xx) rtx's for all webs, as we need to know
1671 the number, to make sure, flow has enough memory for them in the
1673 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1676 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1678 if (web
->type
== COALESCED
&& alias (web
)->type
== COLORED
)
1680 if (web
->reg_rtx
|| web
->regno
< FIRST_PSEUDO_REGISTER
)
1683 if (web
->regno
>= max_normal_pseudo
)
1686 if (web
->color
== an_unusable_color
)
1688 unsigned int inherent_size
= PSEUDO_REGNO_BYTES (web
->regno
);
1689 unsigned int total_size
= MAX (inherent_size
, 0);
1690 place
= assign_stack_local (PSEUDO_REGNO_MODE (web
->regno
),
1692 inherent_size
== total_size
? 0 : -1);
1693 RTX_UNCHANGING_P (place
) =
1694 RTX_UNCHANGING_P (regno_reg_rtx
[web
->regno
]);
1695 set_mem_alias_set (place
, new_alias_set ());
1699 place
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1701 web
->reg_rtx
= place
;
1705 /* Special case for i386 'fix_truncdi_nomemory' insn.
1706 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1707 Actual only for clobbered register. */
1708 if (web
->num_uses
== 0 && web
->num_defs
== 1)
1709 web
->reg_rtx
= gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web
->defs
[0])));
1711 web
->reg_rtx
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1712 /* Remember the different parts directly coalesced to a hardreg. */
1713 if (web
->type
== COALESCED
)
1714 bitmap_set_bit (regnos_coalesced_to_hardregs
, REGNO (web
->reg_rtx
));
1717 ra_max_regno
= max_regno
= max_reg_num ();
1718 allocate_reg_info (max_regno
, FALSE
, FALSE
);
1719 ra_reg_renumber
= xmalloc (max_regno
* sizeof (short));
1720 for (si
= 0; si
< max_regno
; si
++)
1721 ra_reg_renumber
[si
] = -1;
1723 /* Then go through all references, and replace them by a new
1724 pseudoreg for each web. All uses. */
1726 Beware: The order of replacements (first uses, then defs) matters only
1727 for read-mod-write insns, where the RTL expression for the REG is
1728 shared between def and use. For normal rmw insns we connected all such
1729 webs, i.e. both the use and the def (which are the same memory)
1730 there get the same new pseudo-reg, so order would not matter.
1731 _However_ we did not connect webs, were the read cycle was an
1732 uninitialized read. If we now would first replace the def reference
1733 and then the use ref, we would initialize it with a REG rtx, which
1734 gets never initialized, and yet more wrong, which would overwrite
1735 the definition of the other REG rtx. So we must replace the defs last.
1737 for (i
= 0; i
< df
->use_id
; i
++)
1740 regset rs
= DF_REF_BB (df
->uses
[i
])->global_live_at_start
;
1743 web
= find_web_for_subweb (web
);
1744 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1746 regrtx
= alias (web
)->reg_rtx
;
1748 regrtx
= web
->reg_rtx
;
1749 *DF_REF_REAL_LOC (df
->uses
[i
]) = regrtx
;
1750 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1752 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1753 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1758 for (i
= 0; i
< df
->def_id
; i
++)
1764 rs
= DF_REF_BB (df
->defs
[i
])->global_live_at_start
;
1766 web
= find_web_for_subweb (web
);
1767 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1769 regrtx
= alias (web
)->reg_rtx
;
1771 regrtx
= web
->reg_rtx
;
1772 *DF_REF_REAL_LOC (df
->defs
[i
]) = regrtx
;
1773 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1775 /* Don't simply clear the current regno, as it might be
1776 replaced by two webs. */
1777 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1778 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1782 /* And now set up the ra_reg_renumber array for reload with all the new
1784 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1787 if (web
->reg_rtx
&& REG_P (web
->reg_rtx
))
1789 int r
= REGNO (web
->reg_rtx
);
1790 ra_reg_renumber
[r
] = web
->color
;
1791 ra_debug_msg (DUMP_COLORIZE
, "Renumber pseudo %d (== web %d) to %d\n",
1792 r
, web
->id
, ra_reg_renumber
[r
]);
1796 old_regs
= BITMAP_XMALLOC ();
1797 for (si
= FIRST_PSEUDO_REGISTER
; si
< old_max_regno
; si
++)
1798 SET_REGNO_REG_SET (old_regs
, si
);
1801 AND_COMPL_REG_SET (bb
->global_live_at_start
, old_regs
);
1802 AND_COMPL_REG_SET (bb
->global_live_at_end
, old_regs
);
1804 BITMAP_XFREE (old_regs
);
1807 /* Delete some coalesced moves from the insn stream. */
1812 struct move_list
*ml
;
1814 /* XXX Beware: We normally would test here each copy insn, if
1815 source and target got the same color (either by coalescing or by pure
1816 luck), and then delete it.
1817 This will currently not work. One problem is, that we don't color
1818 the regs ourself, but instead defer to reload. So the colorization
1819 is only a kind of suggestion, which reload doesn't have to follow.
1820 For webs which are coalesced to a normal colored web, we only have one
1821 new pseudo, so in this case we indeed can delete copy insns involving
1822 those (because even if reload colors them different from our suggestion,
1823 it still has to color them the same, as only one pseudo exists). But for
1824 webs coalesced to precolored ones, we have not a single pseudo, but
1825 instead one for each coalesced web. This means, that we can't delete
1826 copy insns, where source and target are webs coalesced to precolored
1827 ones, because then the connection between both webs is destroyed. Note
1828 that this not only means copy insns, where one side is the precolored one
1829 itself, but also those between webs which are coalesced to one color.
1830 Also because reload we can't delete copy insns which involve any
1831 precolored web at all. These often have also special meaning (e.g.
1832 copying a return value of a call to a pseudo, or copying pseudo to the
1833 return register), and the deletion would confuse reload in thinking the
1834 pseudo isn't needed. One of those days reload will get away and we can
1835 do everything we want.
1836 In effect because of the later reload, we can't base our deletion on the
1837 colors itself, but instead need to base them on the newly created
1839 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
1840 /* The real condition we would ideally use is: s->color == t->color.
1841 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1842 we want to prevent deletion of "special" copies. */
1844 && (s
= alias (ml
->move
->source_web
))->reg_rtx
1845 == (t
= alias (ml
->move
->target_web
))->reg_rtx
1846 && s
->type
!= PRECOLORED
&& t
->type
!= PRECOLORED
)
1848 basic_block bb
= BLOCK_FOR_INSN (ml
->move
->insn
);
1849 df_insn_delete (df
, bb
, ml
->move
->insn
);
1850 deleted_move_insns
++;
1851 deleted_move_cost
+= bb
->frequency
+ 1;
1855 /* Due to reasons documented elsewhere we create different pseudos
1856 for all webs coalesced to hardregs. For these parts life_analysis()
1857 might have added REG_DEAD notes without considering, that only this part
1858 but not the whole coalesced web dies. The RTL is correct, there is no
1859 coalescing yet. But if later reload's alter_reg() substitutes the
1860 hardreg into the REG rtx it looks like that particular hardreg dies here,
1861 although (due to coalescing) it still is live. This might make different
1862 places of reload think, it can use that hardreg for reload regs,
1863 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1864 (Or better teach life_analysis() and reload about our coalescing, but
1865 that comes later) Bah. */
1868 remove_suspicious_death_notes (void)
1871 for (insn
= get_insns(); insn
; insn
= NEXT_INSN (insn
))
1874 rtx
*pnote
= ®_NOTES (insn
);
1878 if ((REG_NOTE_KIND (note
) == REG_DEAD
1879 || REG_NOTE_KIND (note
) == REG_UNUSED
)
1880 && (REG_P (XEXP (note
, 0))
1881 && bitmap_bit_p (regnos_coalesced_to_hardregs
,
1882 REGNO (XEXP (note
, 0)))))
1883 *pnote
= XEXP (note
, 1);
1885 pnote
= &XEXP (*pnote
, 1);
1888 BITMAP_XFREE (regnos_coalesced_to_hardregs
);
1889 regnos_coalesced_to_hardregs
= NULL
;
1892 /* Allocate space for max_reg_num() pseudo registers, and
1893 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1894 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1897 setup_renumber (int free_it
)
1900 max_regno
= max_reg_num ();
1901 allocate_reg_info (max_regno
, FALSE
, TRUE
);
1902 for (i
= 0; i
< max_regno
; i
++)
1904 reg_renumber
[i
] = (i
< ra_max_regno
) ? ra_reg_renumber
[i
] : -1;
1908 free (ra_reg_renumber
);
1909 ra_reg_renumber
= NULL
;
1914 /* Dump the costs and savings due to spilling, i.e. of added spill insns
1915 and removed moves or useless defs. */
1918 dump_cost (unsigned int level
)
1920 ra_debug_msg (level
, "Instructions for spilling\n added:\n");
1921 ra_debug_msg (level
, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1922 emitted_spill_loads
, spill_load_cost
);
1923 ra_debug_msg (level
, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1924 emitted_spill_stores
, spill_store_cost
);
1925 ra_debug_msg (level
, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1926 emitted_remat
, spill_remat_cost
);
1927 ra_debug_msg (level
, " removed:\n moves =%d cost="
1928 HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1929 deleted_move_insns
, deleted_move_cost
);
1930 ra_debug_msg (level
, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1931 deleted_def_insns
, deleted_def_cost
);
1934 /* Initialization of the rewrite phase. */
1937 ra_rewrite_init (void)
1939 emitted_spill_loads
= 0;
1940 emitted_spill_stores
= 0;
1942 spill_load_cost
= 0;
1943 spill_store_cost
= 0;
1944 spill_remat_cost
= 0;
1945 deleted_move_insns
= 0;
1946 deleted_move_cost
= 0;
1947 deleted_def_insns
= 0;
1948 deleted_def_cost
= 0;
1952 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: