1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
36 #include "insn-config.h"
39 /* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
47 static void spill_coalescing
PARAMS ((sbitmap
, sbitmap
));
48 static unsigned HOST_WIDE_INT spill_prop_savings
PARAMS ((struct web
*,
50 static void spill_prop_insert
PARAMS ((struct web
*, sbitmap
, sbitmap
));
51 static int spill_propagation
PARAMS ((sbitmap
, sbitmap
, sbitmap
));
52 static void spill_coalprop
PARAMS ((void));
53 static void allocate_spill_web
PARAMS ((struct web
*));
54 static void choose_spill_colors
PARAMS ((void));
55 static void rewrite_program
PARAMS ((bitmap
));
56 static void remember_slot
PARAMS ((struct rtx_list
**, rtx
));
57 static int slots_overlap_p
PARAMS ((rtx
, rtx
));
58 static void delete_overlapping_slots
PARAMS ((struct rtx_list
**, rtx
));
59 static int slot_member_p
PARAMS ((struct rtx_list
*, rtx
));
60 static void insert_stores
PARAMS ((bitmap
));
61 static int spill_same_color_p
PARAMS ((struct web
*, struct web
*));
62 static bool is_partly_live_1
PARAMS ((sbitmap
, struct web
*));
63 static void update_spill_colors
PARAMS ((HARD_REG_SET
*, struct web
*, int));
64 static int spill_is_free
PARAMS ((HARD_REG_SET
*, struct web
*));
65 static void emit_loads
PARAMS ((struct rewrite_info
*, int, rtx
));
66 static void reloads_to_loads
PARAMS ((struct rewrite_info
*, struct ref
**,
67 unsigned int, struct web
**));
68 static void rewrite_program2
PARAMS ((bitmap
));
69 static void mark_refs_for_checking
PARAMS ((struct web
*, bitmap
));
70 static void detect_web_parts_to_rebuild
PARAMS ((void));
71 static void delete_useless_defs
PARAMS ((void));
72 static void detect_non_changed_webs
PARAMS ((void));
73 static void reset_changed_flag
PARAMS ((void));
75 /* For tracking some statistics, we count the number (and cost)
76 of deleted move insns. */
77 static unsigned int deleted_move_insns
;
78 static unsigned HOST_WIDE_INT deleted_move_cost
;
80 /* This is the spill coalescing phase. In SPILLED the IDs of all
81 already spilled webs are noted. In COALESCED the IDs of webs still
82 to check for coalescing. This tries to coalesce two webs, which were
83 spilled, are connected by a move, and don't conflict. Greatly
84 reduces memory shuffling. */
87 spill_coalescing (coalesce
, spilled
)
88 sbitmap coalesce
, spilled
;
92 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
93 if ((m
= ml
->move
) != NULL
)
95 struct web
*s
= alias (m
->source_web
);
96 struct web
*t
= alias (m
->target_web
);
97 if ((TEST_BIT (spilled
, s
->id
) && TEST_BIT (coalesce
, t
->id
))
98 || (TEST_BIT (spilled
, t
->id
) && TEST_BIT (coalesce
, s
->id
)))
100 struct conflict_link
*wl
;
101 if (TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
102 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
)
103 || s
->pattern
|| t
->pattern
)
106 deleted_move_insns
++;
107 deleted_move_cost
+= BLOCK_FOR_INSN (m
->insn
)->frequency
+ 1;
108 PUT_CODE (m
->insn
, NOTE
);
109 NOTE_LINE_NUMBER (m
->insn
) = NOTE_INSN_DELETED
;
110 df_insn_modify (df
, BLOCK_FOR_INSN (m
->insn
), m
->insn
);
112 m
->target_web
->target_of_spilled_move
= 1;
114 /* May be, already coalesced due to a former move. */
116 /* Merge the nodes S and T in the I-graph. Beware: the merging
117 of conflicts relies on the fact, that in the conflict list
118 of T all of it's conflicts are noted. This is currently not
119 the case if T would be the target of a coalesced web, because
120 then (in combine () above) only those conflicts were noted in
121 T from the web which was coalesced into T, which at the time
122 of combine() were not already on the SELECT stack or were
123 itself coalesced to something other. */
124 if (t
->type
!= SPILLED
|| s
->type
!= SPILLED
)
126 remove_list (t
->dlink
, &WEBS(SPILLED
));
127 put_web (t
, COALESCED
);
132 for (wl
= t
->conflict_list
; wl
; wl
= wl
->next
)
134 struct web
*pweb
= wl
->t
;
136 record_conflict (s
, pweb
);
139 struct sub_conflict
*sl
;
140 for (sl
= wl
->sub
; sl
; sl
= sl
->next
)
142 struct web
*sweb
= NULL
;
143 if (SUBWEB_P (sl
->s
))
144 sweb
= find_subweb (s
, sl
->s
->orig_x
);
147 record_conflict (sweb
, sl
->t
);
150 /* No decrement_degree here, because we already have colored
151 the graph, and don't want to insert pweb into any other
153 pweb
->num_conflicts
-= 1 + t
->add_hardregs
;
159 /* Returns the probable saving of coalescing WEB with webs from
160 SPILLED, in terms of removed move insn cost. */
162 static unsigned HOST_WIDE_INT
163 spill_prop_savings (web
, spilled
)
167 unsigned HOST_WIDE_INT savings
= 0;
168 struct move_list
*ml
;
173 cost
= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 1);
174 cost
+= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 0);
175 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
176 if ((m
= ml
->move
) != NULL
)
178 struct web
*s
= alias (m
->source_web
);
179 struct web
*t
= alias (m
->target_web
);
186 if (s
!= web
|| !TEST_BIT (spilled
, t
->id
) || t
->pattern
187 || TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
188 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
))
190 savings
+= BLOCK_FOR_INSN (m
->insn
)->frequency
* cost
;
195 /* This add all IDs of colored webs, which are connected to WEB by a move
196 to LIST and PROCESSED. */
199 spill_prop_insert (web
, list
, processed
)
201 sbitmap list
, processed
;
203 struct move_list
*ml
;
205 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
206 if ((m
= ml
->move
) != NULL
)
208 struct web
*s
= alias (m
->source_web
);
209 struct web
*t
= alias (m
->target_web
);
216 if (s
!= web
|| t
->type
!= COLORED
|| TEST_BIT (processed
, t
->id
))
218 SET_BIT (list
, t
->id
);
219 SET_BIT (processed
, t
->id
);
223 /* The spill propagation pass. If we have to spilled webs, the first
224 connected through a move to a colored one, and the second also connected
225 to that colored one, and this colored web is only used to connect both
226 spilled webs, it might be worthwhile to spill that colored one.
227 This is the case, if the cost of the removed copy insns (all three webs
228 could be placed into the same stack slot) is higher than the spill cost
230 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
231 SPILLED the set of all spilled webs so far and PROCESSED the set
232 of all webs processed so far, so we don't do work twice. */
235 spill_propagation (to_prop
, spilled
, processed
)
236 sbitmap to_prop
, spilled
, processed
;
240 sbitmap list
= sbitmap_alloc (num_webs
);
243 /* First insert colored move neighbors into the candidate list. */
244 EXECUTE_IF_SET_IN_SBITMAP (to_prop
, 0, id
,
246 spill_prop_insert (ID2WEB (id
), list
, processed
);
248 sbitmap_zero (to_prop
);
250 /* For all candidates, see, if the savings are higher than it's
252 while ((id
= sbitmap_first_set_bit (list
)) >= 0)
254 struct web
*web
= ID2WEB (id
);
255 RESET_BIT (list
, id
);
256 if (spill_prop_savings (web
, spilled
) >= web
->spill_cost
)
258 /* If so, we found a new spilled web. Insert it's colored
259 move neighbors again, and mark, that we need to repeat the
260 whole mainloop of spillprog/coalescing again. */
261 remove_web_from_list (web
);
263 put_web (web
, SPILLED
);
264 SET_BIT (spilled
, id
);
265 SET_BIT (to_prop
, id
);
266 spill_prop_insert (web
, list
, processed
);
274 /* The main phase to improve spill costs. This repeatedly runs
275 spill coalescing and spill propagation, until nothing changes. */
280 sbitmap spilled
, processed
, to_prop
;
283 spilled
= sbitmap_alloc (num_webs
);
284 processed
= sbitmap_alloc (num_webs
);
285 to_prop
= sbitmap_alloc (num_webs
);
286 sbitmap_zero (spilled
);
287 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
288 SET_BIT (spilled
, DLIST_WEB (d
)->id
);
289 sbitmap_copy (to_prop
, spilled
);
290 sbitmap_zero (processed
);
293 spill_coalescing (to_prop
, spilled
);
294 /* XXX Currently (with optimistic coalescing) spill_propagation()
295 doesn't give better code, sometimes it gives worse (but not by much)
296 code. I believe this is because of slightly wrong cost
297 measurements. Anyway right now it isn't worth the time it takes,
298 so deactivate it for now. */
299 again
= 0 && spill_propagation (to_prop
, spilled
, processed
);
302 sbitmap_free (to_prop
);
303 sbitmap_free (processed
);
304 sbitmap_free (spilled
);
307 /* Allocate a spill slot for a WEB. Currently we spill to pseudo
308 registers, to be able to track also webs for "stack slots", and also
309 to possibly colorize them. These pseudos are sometimes handled
310 in a special way, where we know, that they also can represent
314 allocate_spill_web (web
)
317 int regno
= web
->regno
;
321 slot
= gen_reg_rtx (PSEUDO_REGNO_MODE (regno
));
322 web
->stack_slot
= slot
;
325 /* This chooses a color for all SPILLED webs for interference region
326 spilling. The heuristic isn't good in any way. */
329 choose_spill_colors ()
332 unsigned HOST_WIDE_INT
*costs
= (unsigned HOST_WIDE_INT
*)
333 xmalloc (FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
334 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
336 struct web
*web
= DLIST_WEB (d
);
337 struct conflict_link
*wl
;
340 memset (costs
, 0, FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
341 for (wl
= web
->conflict_list
; wl
; wl
= wl
->next
)
343 struct web
*pweb
= wl
->t
;
344 if (pweb
->type
== COLORED
|| pweb
->type
== PRECOLORED
)
345 costs
[pweb
->color
] += pweb
->spill_cost
;
348 COPY_HARD_REG_SET (avail
, web
->usable_regs
);
349 if (web
->crosses_call
)
351 /* Add an arbitrary constant cost to colors not usable by
352 call-crossing webs without saves/loads. */
353 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
354 if (TEST_HARD_REG_BIT (call_used_reg_set
, c
))
358 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
359 if ((bestc
< 0 || costs
[bestc
] > costs
[c
])
360 && TEST_HARD_REG_BIT (avail
, c
)
361 && HARD_REGNO_MODE_OK (c
, PSEUDO_REGNO_MODE (web
->regno
)))
364 size
= HARD_REGNO_NREGS (c
, PSEUDO_REGNO_MODE (web
->regno
));
366 && TEST_HARD_REG_BIT (avail
, c
+ i
); i
++);
371 ra_debug_msg (DUMP_PROCESS
, "choosing color %d for spilled web %d\n",
378 /* For statistics sake we count the number and cost of all new loads,
379 stores and emitted rematerializations. */
380 static unsigned int emitted_spill_loads
;
381 static unsigned int emitted_spill_stores
;
382 static unsigned int emitted_remat
;
383 static unsigned HOST_WIDE_INT spill_load_cost
;
384 static unsigned HOST_WIDE_INT spill_store_cost
;
385 static unsigned HOST_WIDE_INT spill_remat_cost
;
387 /* In rewrite_program2() we detect if some def us useless, in the sense,
388 that the pseudo set is not live anymore at that point. The REF_IDs
389 of such defs are noted here. */
390 static bitmap useless_defs
;
392 /* This is the simple and fast version of rewriting the program to
393 include spill code. It spills at every insn containing spilled
394 defs or uses. Loads are added only if flag_ra_spill_every_use is
395 nonzero, otherwise only stores will be added. This doesn't
396 support rematerialization.
397 NEW_DEATHS is filled with uids for insns, which probably contain
401 rewrite_program (new_deaths
)
406 bitmap b
= BITMAP_XMALLOC ();
408 /* We walk over all webs, over all uses/defs. For all webs, we need
409 to look at spilled webs, and webs coalesced to spilled ones, in case
410 their alias isn't broken up, or they got spill coalesced. */
411 for (i
= 0; i
< 2; i
++)
412 for (d
= (i
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
414 struct web
*web
= DLIST_WEB (d
);
415 struct web
*aweb
= alias (web
);
419 /* Is trivially true for spilled webs, but not for coalesced ones. */
420 if (aweb
->type
!= SPILLED
)
423 /* First add loads before every use, if we have to. */
424 if (flag_ra_spill_every_use
)
427 allocate_spill_web (aweb
);
428 slot
= aweb
->stack_slot
;
429 for (j
= 0; j
< web
->num_uses
; j
++)
431 rtx insns
, target
, source
;
432 rtx insn
= DF_REF_INSN (web
->uses
[j
]);
433 rtx prev
= PREV_INSN (insn
);
434 basic_block bb
= BLOCK_FOR_INSN (insn
);
435 /* Happens when spill_coalescing() deletes move insns. */
439 /* Check that we didn't already added a load for this web
440 and insn. Happens, when the an insn uses the same web
442 if (bitmap_bit_p (b
, INSN_UID (insn
)))
444 bitmap_set_bit (b
, INSN_UID (insn
));
445 target
= DF_REF_REG (web
->uses
[j
]);
448 if (GET_CODE (target
) == SUBREG
)
449 source
= simplify_gen_subreg (GET_MODE (target
), source
,
451 SUBREG_BYTE (target
));
452 ra_emit_move_insn (target
, source
);
453 insns
= get_insns ();
455 emit_insn_before (insns
, insn
);
457 if (bb
->head
== insn
)
458 bb
->head
= NEXT_INSN (prev
);
459 for (insn
= PREV_INSN (insn
); insn
!= prev
;
460 insn
= PREV_INSN (insn
))
462 set_block_for_insn (insn
, bb
);
463 df_insn_modify (df
, bb
, insn
);
466 emitted_spill_loads
++;
467 spill_load_cost
+= bb
->frequency
+ 1;
471 /* Now emit the stores after each def.
472 If any uses were loaded from stackslots (compared to
473 rematerialized or not reloaded due to IR spilling),
474 aweb->stack_slot will be set. If not, we don't need to emit
476 slot
= aweb
->stack_slot
;
479 for (j
= 0; j
< web
->num_defs
; j
++)
481 rtx insns
, source
, dest
;
482 rtx insn
= DF_REF_INSN (web
->defs
[j
]);
483 rtx following
= NEXT_INSN (insn
);
484 basic_block bb
= BLOCK_FOR_INSN (insn
);
485 /* Happens when spill_coalescing() deletes move insns. */
488 if (bitmap_bit_p (b
, INSN_UID (insn
)))
490 bitmap_set_bit (b
, INSN_UID (insn
));
492 source
= DF_REF_REG (web
->defs
[j
]);
494 if (GET_CODE (source
) == SUBREG
)
495 dest
= simplify_gen_subreg (GET_MODE (source
), dest
,
497 SUBREG_BYTE (source
));
498 ra_emit_move_insn (dest
, source
);
500 insns
= get_insns ();
504 emit_insn_after (insns
, insn
);
506 bb
->end
= PREV_INSN (following
);
507 for (insn
= insns
; insn
!= following
; insn
= NEXT_INSN (insn
))
509 set_block_for_insn (insn
, bb
);
510 df_insn_modify (df
, bb
, insn
);
514 df_insn_modify (df
, bb
, insn
);
515 emitted_spill_stores
++;
516 spill_store_cost
+= bb
->frequency
+ 1;
517 /* XXX we should set new_deaths for all inserted stores
518 whose pseudo dies here.
519 Note, that this isn't the case for _all_ stores. */
520 /* I.e. the next is wrong, and might cause some spilltemps
521 to be categorized as spilltemp2's (i.e. live over a death),
522 although they aren't. This might make them spill again,
523 which causes endlessness in the case, this insn is in fact
525 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
532 /* A simple list of rtx's. */
535 struct rtx_list
*next
;
539 /* Adds X to *LIST. */
542 remember_slot (list
, x
)
543 struct rtx_list
**list
;
547 /* PRE: X is not already in LIST. */
548 l
= (struct rtx_list
*) ra_alloc (sizeof (*l
));
554 /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
555 thereof), return nonzero, if they overlap. REGs and MEMs don't
556 overlap, and if they are MEMs they must have an easy address
557 (plus (basereg) (const_inst x)), otherwise they overlap. */
560 slots_overlap_p (s1
, s2
)
564 HOST_WIDE_INT ofs1
= 0, ofs2
= 0;
565 int size1
= GET_MODE_SIZE (GET_MODE (s1
));
566 int size2
= GET_MODE_SIZE (GET_MODE (s2
));
567 if (GET_CODE (s1
) == SUBREG
)
568 ofs1
= SUBREG_BYTE (s1
), s1
= SUBREG_REG (s1
);
569 if (GET_CODE (s2
) == SUBREG
)
570 ofs2
= SUBREG_BYTE (s2
), s2
= SUBREG_REG (s2
);
575 if (GET_CODE (s1
) != GET_CODE (s2
))
578 if (GET_CODE (s1
) == REG
&& GET_CODE (s2
) == REG
)
580 if (REGNO (s1
) != REGNO (s2
))
582 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
586 if (GET_CODE (s1
) != MEM
|| GET_CODE (s2
) != MEM
)
590 if (GET_CODE (s1
) != PLUS
|| GET_CODE (XEXP (s1
, 0)) != REG
591 || GET_CODE (XEXP (s1
, 1)) != CONST_INT
)
593 if (GET_CODE (s2
) != PLUS
|| GET_CODE (XEXP (s2
, 0)) != REG
594 || GET_CODE (XEXP (s2
, 1)) != CONST_INT
)
596 base1
= XEXP (s1
, 0);
597 base2
= XEXP (s2
, 0);
598 if (!rtx_equal_p (base1
, base2
))
600 ofs1
+= INTVAL (XEXP (s1
, 1));
601 ofs2
+= INTVAL (XEXP (s2
, 1));
602 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
607 /* This deletes from *LIST all rtx's which overlap with X in the sense
608 of slots_overlap_p(). */
611 delete_overlapping_slots (list
, x
)
612 struct rtx_list
**list
;
617 if (slots_overlap_p ((*list
)->x
, x
))
618 *list
= (*list
)->next
;
620 list
= &((*list
)->next
);
624 /* Returns nonzero, of X is member of LIST. */
627 slot_member_p (list
, x
)
628 struct rtx_list
*list
;
631 for (;list
; list
= list
->next
)
632 if (rtx_equal_p (list
->x
, x
))
637 /* A more sophisticated (and slower) method of adding the stores, than
638 rewrite_program(). This goes backward the insn stream, adding
639 stores as it goes, but only if it hasn't just added a store to the
640 same location. NEW_DEATHS is a bitmap filled with uids of insns
641 containing deaths. */
644 insert_stores (new_deaths
)
648 rtx last_slot
= NULL_RTX
;
649 struct rtx_list
*slots
= NULL
;
651 /* We go simply backwards over basic block borders. */
652 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
654 int uid
= INSN_UID (insn
);
656 /* If we reach a basic block border, which has more than one
657 outgoing edge, we simply forget all already emitted stores. */
658 if (GET_CODE (insn
) == BARRIER
659 || JUMP_P (insn
) || can_throw_internal (insn
))
661 last_slot
= NULL_RTX
;
667 /* If this insn was not just added in this pass. */
668 if (uid
< insn_df_max_uid
)
671 rtx following
= NEXT_INSN (insn
);
672 basic_block bb
= BLOCK_FOR_INSN (insn
);
673 struct ra_insn_info info
;
676 for (n
= 0; n
< info
.num_defs
; n
++)
678 struct web
*web
= def2web
[DF_REF_ID (info
.defs
[n
])];
679 struct web
*aweb
= alias (find_web_for_subweb (web
));
681 if (aweb
->type
!= SPILLED
|| !aweb
->stack_slot
)
683 slot
= aweb
->stack_slot
;
684 source
= DF_REF_REG (info
.defs
[n
]);
685 /* adjust_address() might generate code. */
687 if (GET_CODE (source
) == SUBREG
)
688 slot
= simplify_gen_subreg (GET_MODE (source
), slot
,
690 SUBREG_BYTE (source
));
691 /* If we have no info about emitted stores, or it didn't
692 contain the location we intend to use soon, then
694 if ((!last_slot
|| !rtx_equal_p (slot
, last_slot
))
695 && ! slot_member_p (slots
, slot
))
699 remember_slot (&slots
, slot
);
700 ra_emit_move_insn (slot
, source
);
701 insns
= get_insns ();
705 emit_insn_after (insns
, insn
);
707 bb
->end
= PREV_INSN (following
);
708 for (ni
= insns
; ni
!= following
; ni
= NEXT_INSN (ni
))
710 set_block_for_insn (ni
, bb
);
711 df_insn_modify (df
, bb
, ni
);
715 df_insn_modify (df
, bb
, insn
);
716 emitted_spill_stores
++;
717 spill_store_cost
+= bb
->frequency
+ 1;
718 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
722 /* Otherwise ignore insns from adjust_address() above. */
727 /* If we look at a load generated by the allocator, forget
728 the last emitted slot, and additionally clear all slots
729 overlapping it's source (after all, we need it again). */
730 /* XXX If we emit the stack-ref directly into the using insn the
731 following needs a change, because that is no new insn. Preferably
732 we would add some notes to the insn, what stackslots are needed
734 if (uid
>= last_max_uid
)
736 rtx set
= single_set (insn
);
737 last_slot
= NULL_RTX
;
738 /* If this was no simple set, give up, and forget everything. */
743 if (1 || GET_CODE (SET_SRC (set
)) == MEM
)
744 delete_overlapping_slots (&slots
, SET_SRC (set
));
750 /* Returns 1 if both colored webs have some hardregs in common, even if
751 they are not the same width. */
754 spill_same_color_p (web1
, web2
)
755 struct web
*web1
, *web2
;
757 int c1
, size1
, c2
, size2
;
758 if ((c1
= alias (web1
)->color
) < 0 || c1
== an_unusable_color
)
760 if ((c2
= alias (web2
)->color
) < 0 || c2
== an_unusable_color
)
763 size1
= web1
->type
== PRECOLORED
764 ? 1 : HARD_REGNO_NREGS (c1
, PSEUDO_REGNO_MODE (web1
->regno
));
765 size2
= web2
->type
== PRECOLORED
766 ? 1 : HARD_REGNO_NREGS (c2
, PSEUDO_REGNO_MODE (web2
->regno
));
767 if (c1
>= c2
+ size2
|| c2
>= c1
+ size1
)
772 /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
773 subwebs (or WEB itself) is live. */
776 is_partly_live_1 (live
, web
)
781 if (TEST_BIT (live
, web
->id
))
783 while ((web
= web
->subreg_next
));
787 /* Fast version in case WEB has no subwebs. */
788 #define is_partly_live(live, web) ((!web->subreg_next) \
789 ? TEST_BIT (live, web->id) \
790 : is_partly_live_1 (live, web))
792 /* Change the set of currently IN_USE colors according to
793 WEB's color. Either add those colors to the hardreg set (if ADD
794 is nonzero), or remove them. */
797 update_spill_colors (in_use
, web
, add
)
798 HARD_REG_SET
*in_use
;
803 if ((c
= alias (find_web_for_subweb (web
))->color
) < 0
804 || c
== an_unusable_color
)
806 size
= HARD_REGNO_NREGS (c
, GET_MODE (web
->orig_x
));
809 c
+= subreg_regno_offset (c
, GET_MODE (SUBREG_REG (web
->orig_x
)),
810 SUBREG_BYTE (web
->orig_x
),
811 GET_MODE (web
->orig_x
));
813 else if (web
->type
== PRECOLORED
)
817 SET_HARD_REG_BIT (*in_use
, c
+ size
);
820 CLEAR_HARD_REG_BIT (*in_use
, c
+ size
);
823 /* Given a set of hardregs currently IN_USE and the color C of WEB,
824 return -1 if WEB has no color, 1 of it has the unusable color,
825 0 if one of it's used hardregs are in use, and 1 otherwise.
826 Generally, if WEB can't be left colorized return 1. */
829 spill_is_free (in_use
, web
)
830 HARD_REG_SET
*in_use
;
834 if ((c
= alias (web
)->color
) < 0)
836 if (c
== an_unusable_color
)
838 size
= web
->type
== PRECOLORED
839 ? 1 : HARD_REGNO_NREGS (c
, PSEUDO_REGNO_MODE (web
->regno
));
841 if (TEST_HARD_REG_BIT (*in_use
, c
+ size
))
847 /* Structure for passing between rewrite_program2() and emit_loads(). */
850 /* The web IDs which currently would need a reload. These are
851 currently live spilled webs, whose color was still free. */
853 /* We need a scratch bitmap, but don't want to allocate one a zillion
856 /* Web IDs of currently live webs. This are the precise IDs,
857 not just those of the superwebs. If only on part is live, only
858 that ID is placed here. */
860 /* An array of webs, which currently need a load added.
861 They will be emitted when seeing the first death. */
862 struct web
**needed_loads
;
863 /* The current number of entries in needed_loads. */
865 /* The number of bits set in need_reload. */
867 /* The current set of hardregs not available. */
868 HARD_REG_SET colors_in_use
;
869 /* Nonzero, if we just added some spill temps to need_reload or
870 needed_loads. In this case we don't wait for the next death
871 to emit their loads. */
872 int any_spilltemps_spilled
;
873 /* Nonzero, if we currently need to emit the loads. E.g. when we
874 saw an insn containing deaths. */
878 /* The needed_loads list of RI contains some webs for which
879 we add the actual load insns here. They are added just before
880 their use last seen. NL_FIRST_RELOAD is the index of the first
881 load which is a converted reload, all other entries are normal
882 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
885 emit_loads (ri
, nl_first_reload
, last_block_insn
)
886 struct rewrite_info
*ri
;
891 for (j
= ri
->nl_size
; j
;)
893 struct web
*web
= ri
->needed_loads
[--j
];
897 rtx before
= NULL_RTX
, after
= NULL_RTX
;
899 /* When spilltemps were spilled for the last insns, their
900 loads already are emitted, which is noted by setting
901 needed_loads[] for it to 0. */
904 supweb
= find_web_for_subweb (web
);
905 if (supweb
->regno
>= max_normal_pseudo
)
907 /* Check for web being a spilltemp, if we only want to
908 load spilltemps. Also remember, that we emitted that
909 load, which we don't need to do when we have a death,
910 because then all of needed_loads[] is emptied. */
913 if (!supweb
->spill_temp
)
916 ri
->needed_loads
[j
] = 0;
919 /* The adding of reloads doesn't depend on liveness. */
920 if (j
< nl_first_reload
&& !TEST_BIT (ri
->live
, web
->id
))
922 aweb
= alias (supweb
);
927 /* XXX If we later allow non-constant sources for rematerialization
928 we must also disallow coalescing _to_ rematerialized webs
929 (at least then disallow spilling them, which we already ensure
930 when flag_ra_break_aliases), or not take the pattern but a
934 slot
= copy_rtx (supweb
->pattern
);
935 reg
= copy_rtx (supweb
->orig_x
);
936 /* Sanity check. orig_x should be a REG rtx, which should be
937 shared over all RTL, so copy_rtx should have no effect. */
938 if (reg
!= supweb
->orig_x
)
943 allocate_spill_web (aweb
);
944 slot
= aweb
->stack_slot
;
946 /* If we don't copy the RTL there might be some SUBREG
947 rtx shared in the next iteration although being in
948 different webs, which leads to wrong code. */
949 reg
= copy_rtx (web
->orig_x
);
950 if (GET_CODE (reg
) == SUBREG
)
951 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
953 slot
= simplify_gen_subreg (GET_MODE (reg
), slot
, GET_MODE (slot
),
956 ra_emit_move_insn (reg
, slot
);
959 before
= web
->last_use_insn
;
960 web
->last_use_insn
= NULL_RTX
;
963 if (JUMP_P (last_block_insn
))
964 before
= last_block_insn
;
966 after
= last_block_insn
;
970 rtx foll
= NEXT_INSN (after
);
971 bb
= BLOCK_FOR_INSN (after
);
972 emit_insn_after (ni
, after
);
973 if (bb
->end
== after
)
974 bb
->end
= PREV_INSN (foll
);
975 for (ni
= NEXT_INSN (after
); ni
!= foll
; ni
= NEXT_INSN (ni
))
977 set_block_for_insn (ni
, bb
);
978 df_insn_modify (df
, bb
, ni
);
983 rtx prev
= PREV_INSN (before
);
984 bb
= BLOCK_FOR_INSN (before
);
985 emit_insn_before (ni
, before
);
986 if (bb
->head
== before
)
987 bb
->head
= NEXT_INSN (prev
);
988 for (; ni
!= before
; ni
= NEXT_INSN (ni
))
990 set_block_for_insn (ni
, bb
);
991 df_insn_modify (df
, bb
, ni
);
997 spill_remat_cost
+= bb
->frequency
+ 1;
1001 emitted_spill_loads
++;
1002 spill_load_cost
+= bb
->frequency
+ 1;
1004 RESET_BIT (ri
->live
, web
->id
);
1005 /* In the special case documented above only emit the reloads and
1007 if (ri
->need_load
== 2 && j
< nl_first_reload
)
1014 /* Given a set of reloads in RI, an array of NUM_REFS references (either
1015 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
1016 (either use2web or def2web) convert some reloads to loads.
1017 This looks at the webs referenced, and how they change the set of
1018 available colors. Now put all still live webs, which needed reloads,
1019 and whose colors isn't free anymore, on the needed_loads list. */
1022 reloads_to_loads (ri
, refs
, num_refs
, ref2web
)
1023 struct rewrite_info
*ri
;
1025 unsigned int num_refs
;
1026 struct web
**ref2web
;
1029 int num_reloads
= ri
->num_reloads
;
1030 for (n
= 0; n
< num_refs
&& num_reloads
; n
++)
1032 struct web
*web
= ref2web
[DF_REF_ID (refs
[n
])];
1033 struct web
*supweb
= find_web_for_subweb (web
);
1036 /* Only emit reloads when entering their interference
1037 region. A use of a spilled web never opens an
1038 interference region, independent of it's color. */
1039 if (alias (supweb
)->type
== SPILLED
)
1041 if (supweb
->type
== PRECOLORED
1042 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1044 /* Note, that if web (and supweb) are DEFs, we already cleared
1045 the corresponding bits in live. I.e. is_death becomes true, which
1047 is_death
= !TEST_BIT (ri
->live
, supweb
->id
);
1048 is_death
&= !TEST_BIT (ri
->live
, web
->id
);
1051 int old_num_r
= num_reloads
;
1052 bitmap_clear (ri
->scratch
);
1053 EXECUTE_IF_SET_IN_BITMAP (ri
->need_reload
, 0, j
,
1055 struct web
*web2
= ID2WEB (j
);
1056 struct web
*aweb2
= alias (find_web_for_subweb (web2
));
1057 if (spill_is_free (&(ri
->colors_in_use
), aweb2
) == 0)
1059 if (spill_same_color_p (supweb
, aweb2
)
1060 /* && interfere (web, web2) */)
1064 ri
->needed_loads
[ri
->nl_size
++] = web2
;
1067 bitmap_set_bit (ri
->scratch
, j
);
1071 if (num_reloads
!= old_num_r
)
1072 bitmap_operation (ri
->need_reload
, ri
->need_reload
, ri
->scratch
,
1076 ri
->num_reloads
= num_reloads
;
1079 /* This adds loads for spilled webs to the program. It uses a kind of
1080 interference region spilling. If flag_ra_ir_spilling is zero it
1081 only uses improved chaitin spilling (adding loads only at insns
1082 containing deaths). */
1085 rewrite_program2 (new_deaths
)
1088 basic_block bb
= NULL
;
1089 int nl_first_reload
;
1090 struct rewrite_info ri
;
1092 ri
.needed_loads
= (struct web
**) xmalloc (num_webs
* sizeof (struct web
*));
1093 ri
.need_reload
= BITMAP_XMALLOC ();
1094 ri
.scratch
= BITMAP_XMALLOC ();
1095 ri
.live
= sbitmap_alloc (num_webs
);
1098 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
1100 basic_block last_bb
= NULL
;
1101 rtx last_block_insn
;
1104 insn
= prev_real_insn (insn
);
1105 while (insn
&& !(bb
= BLOCK_FOR_INSN (insn
)))
1106 insn
= prev_real_insn (insn
);
1110 last_block_insn
= insn
;
1112 sbitmap_zero (ri
.live
);
1113 CLEAR_HARD_REG_SET (ri
.colors_in_use
);
1114 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[i
- 2], 0, j
,
1116 struct web
*web
= use2web
[j
];
1117 struct web
*aweb
= alias (find_web_for_subweb (web
));
1118 /* A web is only live at end, if it isn't spilled. If we wouldn't
1119 check this, the last uses of spilled web per basic block
1120 wouldn't be detected as deaths, although they are in the final
1121 code. This would lead to cumulating many loads without need,
1122 only increasing register pressure. */
1123 /* XXX do add also spilled webs which got a color for IR spilling.
1124 Remember to not add to colors_in_use in that case. */
1125 if (aweb
->type
!= SPILLED
/*|| aweb->color >= 0*/)
1127 SET_BIT (ri
.live
, web
->id
);
1128 if (aweb
->type
!= SPILLED
)
1129 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1133 bitmap_clear (ri
.need_reload
);
1135 ri
.any_spilltemps_spilled
= 0;
1136 if (flag_ra_ir_spilling
)
1140 /* XXX If we don't add spilled nodes into live above, the following
1141 becomes an empty loop. */
1142 for (pass
= 0; pass
< 2; pass
++)
1143 for (d
= (pass
) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1145 struct web
*web
= DLIST_WEB (d
);
1146 struct web
*aweb
= alias (web
);
1147 if (aweb
->type
!= SPILLED
)
1149 if (is_partly_live (ri
.live
, web
)
1150 && spill_is_free (&(ri
.colors_in_use
), web
) > 0)
1153 bitmap_set_bit (ri
.need_reload
, web
->id
);
1154 /* Last using insn is somewhere in another block. */
1155 web
->last_use_insn
= NULL_RTX
;
1161 for (; insn
; insn
= PREV_INSN (insn
))
1163 struct ra_insn_info info
;
1166 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
) != last_bb
)
1168 int index
= BLOCK_FOR_INSN (insn
)->index
+ 2;
1169 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[index
- 2], 0, j
,
1171 struct web
*web
= use2web
[j
];
1172 struct web
*aweb
= alias (find_web_for_subweb (web
));
1173 if (aweb
->type
!= SPILLED
)
1175 SET_BIT (ri
.live
, web
->id
);
1176 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1179 bitmap_clear (ri
.scratch
);
1180 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1182 struct web
*web2
= ID2WEB (j
);
1183 struct web
*supweb2
= find_web_for_subweb (web2
);
1184 struct web
*aweb2
= alias (supweb2
);
1185 if (spill_is_free (&(ri
.colors_in_use
), aweb2
) <= 0)
1189 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1192 bitmap_set_bit (ri
.scratch
, j
);
1196 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1198 last_bb
= BLOCK_FOR_INSN (insn
);
1199 last_block_insn
= insn
;
1200 if (!INSN_P (last_block_insn
))
1201 last_block_insn
= prev_real_insn (last_block_insn
);
1206 info
= insn_df
[INSN_UID (insn
)];
1209 for (n
= 0; n
< info
.num_defs
; n
++)
1211 struct ref
*ref
= info
.defs
[n
];
1212 struct web
*web
= def2web
[DF_REF_ID (ref
)];
1213 struct web
*supweb
= find_web_for_subweb (web
);
1217 supweb
= find_web_for_subweb (web
);
1218 /* Webs which are defined here, but also used in the same insn
1219 are rmw webs, or this use isn't a death because of looping
1220 constructs. In neither case makes this def available it's
1221 resources. Reloads for it are still needed, it's still
1222 live and it's colors don't become free. */
1223 for (n2
= 0; n2
< info
.num_uses
; n2
++)
1225 struct web
*web2
= use2web
[DF_REF_ID (info
.uses
[n2
])];
1226 if (supweb
== find_web_for_subweb (web2
))
1235 if (!is_partly_live (ri
.live
, supweb
))
1236 bitmap_set_bit (useless_defs
, DF_REF_ID (ref
));
1238 RESET_BIT (ri
.live
, web
->id
);
1239 if (bitmap_bit_p (ri
.need_reload
, web
->id
))
1242 bitmap_clear_bit (ri
.need_reload
, web
->id
);
1246 /* XXX subwebs aren't precisely tracked here. We have
1247 everything we need (inverse webs), but the code isn't
1248 yet written. We need to make all completely
1249 overlapping web parts non-live here. */
1250 /* If by luck now the whole web isn't live anymore, no
1251 reloads for it are needed. */
1252 if (!is_partly_live (ri
.live
, supweb
)
1253 && bitmap_bit_p (ri
.need_reload
, supweb
->id
))
1256 bitmap_clear_bit (ri
.need_reload
, supweb
->id
);
1262 /* If the whole web is defined here, no parts of it are
1263 live anymore and no reloads are needed for them. */
1264 for (sweb
= supweb
->subreg_next
; sweb
;
1265 sweb
= sweb
->subreg_next
)
1267 RESET_BIT (ri
.live
, sweb
->id
);
1268 if (bitmap_bit_p (ri
.need_reload
, sweb
->id
))
1271 bitmap_clear_bit (ri
.need_reload
, sweb
->id
);
1275 if (alias (supweb
)->type
!= SPILLED
)
1276 update_spill_colors (&(ri
.colors_in_use
), web
, 0);
1279 nl_first_reload
= ri
.nl_size
;
1281 /* CALL_INSNs are not really deaths, but still more registers
1282 are free after a call, than before.
1283 XXX Note, that sometimes reload barfs when we emit insns between
1284 a call and the insn which copies the return register into a
1286 if (GET_CODE (insn
) == CALL_INSN
)
1288 else if (INSN_P (insn
))
1289 for (n
= 0; n
< info
.num_uses
; n
++)
1291 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1292 struct web
*supweb
= find_web_for_subweb (web
);
1294 if (supweb
->type
== PRECOLORED
1295 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1297 is_death
= !TEST_BIT (ri
.live
, supweb
->id
);
1298 is_death
&= !TEST_BIT (ri
.live
, web
->id
);
1302 bitmap_set_bit (new_deaths
, INSN_UID (insn
));
1307 if (INSN_P (insn
) && ri
.num_reloads
)
1309 int old_num_reloads
= ri
.num_reloads
;
1310 reloads_to_loads (&ri
, info
.uses
, info
.num_uses
, use2web
);
1312 /* If this insn sets a pseudo, which isn't used later
1313 (i.e. wasn't live before) it is a dead store. We need
1314 to emit all reloads which have the same color as this def.
1315 We don't need to check for non-liveness here to detect
1316 the deadness (it anyway is too late, as we already cleared
1317 the liveness in the first loop over the defs), because if it
1318 _would_ be live here, no reload could have that color, as
1319 they would already have been converted to a load. */
1321 reloads_to_loads (&ri
, info
.defs
, info
.num_defs
, def2web
);
1322 if (ri
.num_reloads
!= old_num_reloads
&& !ri
.need_load
)
1326 if (ri
.nl_size
&& (ri
.need_load
|| ri
.any_spilltemps_spilled
))
1327 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1329 if (INSN_P (insn
) && flag_ra_ir_spilling
)
1330 for (n
= 0; n
< info
.num_uses
; n
++)
1332 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1333 struct web
*aweb
= alias (find_web_for_subweb (web
));
1334 if (aweb
->type
!= SPILLED
)
1335 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1338 ri
.any_spilltemps_spilled
= 0;
1340 for (n
= 0; n
< info
.num_uses
; n
++)
1342 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1343 struct web
*supweb
= find_web_for_subweb (web
);
1344 struct web
*aweb
= alias (supweb
);
1345 SET_BIT (ri
.live
, web
->id
);
1346 if (aweb
->type
!= SPILLED
)
1348 if (supweb
->spill_temp
)
1349 ri
.any_spilltemps_spilled
= 1;
1350 web
->last_use_insn
= insn
;
1353 if (spill_is_free (&(ri
.colors_in_use
), aweb
) <= 0
1354 || !flag_ra_ir_spilling
)
1356 ri
.needed_loads
[ri
.nl_size
++] = web
;
1360 else if (!bitmap_bit_p (ri
.need_reload
, web
->id
))
1362 bitmap_set_bit (ri
.need_reload
, web
->id
);
1373 if (GET_CODE (insn
) == CODE_LABEL
)
1377 nl_first_reload
= ri
.nl_size
;
1383 HARD_REG_SET cum_colors
, colors
;
1384 CLEAR_HARD_REG_SET (cum_colors
);
1385 for (e
= bb
->pred
; e
&& num
< 5; e
= e
->pred_next
, num
++)
1388 CLEAR_HARD_REG_SET (colors
);
1389 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[e
->src
->index
], 0, j
,
1391 struct web
*web
= use2web
[j
];
1392 struct web
*aweb
= alias (find_web_for_subweb (web
));
1393 if (aweb
->type
!= SPILLED
)
1394 update_spill_colors (&colors
, web
, 1);
1396 IOR_HARD_REG_SET (cum_colors
, colors
);
1401 bitmap_clear (ri
.scratch
);
1402 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1404 struct web
*web2
= ID2WEB (j
);
1405 struct web
*supweb2
= find_web_for_subweb (web2
);
1406 struct web
*aweb2
= alias (supweb2
);
1407 /* block entry is IR boundary for aweb2?
1408 Currently more some tries for good conditions. */
1409 if (((ra_pass
> 0 || supweb2
->target_of_spilled_move
)
1410 && (1 || in_ir
|| spill_is_free (&cum_colors
, aweb2
) <= 0))
1413 || spill_is_free (&cum_colors
, aweb2
) <= 0)))
1417 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1420 bitmap_set_bit (ri
.scratch
, j
);
1424 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1429 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1430 if (ri
.nl_size
!= 0 /*|| ri.num_reloads != 0*/)
1435 free (ri
.needed_loads
);
1436 sbitmap_free (ri
.live
);
1437 BITMAP_XFREE (ri
.scratch
);
1438 BITMAP_XFREE (ri
.need_reload
);
1441 /* WEBS is a web conflicting with a spilled one. Prepare it
1442 to be able to rescan it in the next pass. Mark all it's uses
1443 for checking, and clear the some members of their web parts
1444 (of defs and uses). Notably don't clear the uplink. We don't
1445 change the layout of this web, just it's conflicts.
1446 Also remember all IDs of its uses in USES_AS_BITMAP. */
1449 mark_refs_for_checking (web
, uses_as_bitmap
)
1451 bitmap uses_as_bitmap
;
1454 for (i
= 0; i
< web
->num_uses
; i
++)
1456 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1457 SET_BIT (last_check_uses
, id
);
1458 bitmap_set_bit (uses_as_bitmap
, id
);
1459 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1460 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1462 for (i
= 0; i
< web
->num_defs
; i
++)
1464 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1465 web_parts
[id
].spanned_deaths
= 0;
1466 web_parts
[id
].crosses_call
= 0;
1470 /* The last step of the spill phase is to set up the structures for
1471 incrementally rebuilding the interference graph. We break up
1472 the web part structure of all spilled webs, mark their uses for
1473 rechecking, look at their neighbors, and clean up some global
1474 information, we will rebuild. */
1477 detect_web_parts_to_rebuild ()
1479 bitmap uses_as_bitmap
;
1480 unsigned int i
, pass
;
1482 sbitmap already_webs
= sbitmap_alloc (num_webs
);
1484 uses_as_bitmap
= BITMAP_XMALLOC ();
1485 if (last_check_uses
)
1486 sbitmap_free (last_check_uses
);
1487 last_check_uses
= sbitmap_alloc (df
->use_id
);
1488 sbitmap_zero (last_check_uses
);
1489 sbitmap_zero (already_webs
);
1490 /* We need to recheck all uses of all webs involved in spilling (and the
1491 uses added by spill insns, but those are not analyzed yet).
1492 Those are the spilled webs themselves, webs coalesced to spilled ones,
1493 and webs conflicting with any of them. */
1494 for (pass
= 0; pass
< 2; pass
++)
1495 for (d
= (pass
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1497 struct web
*web
= DLIST_WEB (d
);
1498 struct conflict_link
*wl
;
1500 /* This check is only needed for coalesced nodes, but hey. */
1501 if (alias (web
)->type
!= SPILLED
)
1504 /* For the spilled web itself we also need to clear it's
1505 uplink, to be able to rebuild smaller webs. After all
1506 spilling has split the web. */
1507 for (i
= 0; i
< web
->num_uses
; i
++)
1509 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1510 SET_BIT (last_check_uses
, id
);
1511 bitmap_set_bit (uses_as_bitmap
, id
);
1512 web_parts
[df
->def_id
+ id
].uplink
= NULL
;
1513 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1514 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1516 for (i
= 0; i
< web
->num_defs
; i
++)
1518 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1519 web_parts
[id
].uplink
= NULL
;
1520 web_parts
[id
].spanned_deaths
= 0;
1521 web_parts
[id
].crosses_call
= 0;
1524 /* Now look at all neighbors of this spilled web. */
1525 if (web
->have_orig_conflicts
)
1526 wl
= web
->orig_conflict_list
;
1528 wl
= web
->conflict_list
;
1529 for (; wl
; wl
= wl
->next
)
1531 if (TEST_BIT (already_webs
, wl
->t
->id
))
1533 SET_BIT (already_webs
, wl
->t
->id
);
1534 mark_refs_for_checking (wl
->t
, uses_as_bitmap
);
1536 EXECUTE_IF_SET_IN_BITMAP (web
->useless_conflicts
, 0, j
,
1538 struct web
*web2
= ID2WEB (j
);
1539 if (TEST_BIT (already_webs
, web2
->id
))
1541 SET_BIT (already_webs
, web2
->id
);
1542 mark_refs_for_checking (web2
, uses_as_bitmap
);
1546 /* We also recheck unconditionally all uses of any hardregs. This means
1547 we _can_ delete all these uses from the live_at_end[] bitmaps.
1548 And because we sometimes delete insn refering to hardregs (when
1549 they became useless because they setup a rematerializable pseudo, which
1550 then was rematerialized), some of those uses will go away with the next
1551 df_analyse(). This means we even _must_ delete those uses from
1552 the live_at_end[] bitmaps. For simplicity we simply delete
1554 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1557 struct df_link
*link
;
1558 for (link
= df
->regs
[i
].uses
; link
; link
= link
->next
)
1560 bitmap_set_bit (uses_as_bitmap
, DF_REF_ID (link
->ref
));
1563 /* The information in live_at_end[] will be rebuild for all uses
1564 we recheck, so clear it here (the uses of spilled webs, might
1565 indeed not become member of it again). */
1567 for (i
= 0; i
< (unsigned int) last_basic_block
+ 2; i
++)
1568 bitmap_operation (live_at_end
[i
], live_at_end
[i
], uses_as_bitmap
,
1572 if (rtl_dump_file
&& (debug_new_regalloc
& DUMP_REBUILD
) != 0)
1574 ra_debug_msg (DUMP_REBUILD
, "need to check these uses:\n");
1575 dump_sbitmap_file (rtl_dump_file
, last_check_uses
);
1577 sbitmap_free (already_webs
);
1578 BITMAP_XFREE (uses_as_bitmap
);
1581 /* Statistics about deleted insns, which are useless now. */
1582 static unsigned int deleted_def_insns
;
1583 static unsigned HOST_WIDE_INT deleted_def_cost
;
1585 /* In rewrite_program2() we noticed, when a certain insn set a pseudo
1586 which wasn't live. Try to delete all those insns. */
1589 delete_useless_defs ()
1592 /* If the insn only sets the def without any sideeffect (besides
1593 clobbers or uses), we can delete it. single_set() also tests
1594 for INSN_P(insn). */
1595 EXECUTE_IF_SET_IN_BITMAP (useless_defs
, 0, i
,
1597 rtx insn
= DF_REF_INSN (df
->defs
[i
]);
1598 rtx set
= single_set (insn
);
1599 struct web
*web
= find_web_for_subweb (def2web
[i
]);
1600 if (set
&& web
->type
== SPILLED
&& web
->stack_slot
== NULL
)
1602 deleted_def_insns
++;
1603 deleted_def_cost
+= BLOCK_FOR_INSN (insn
)->frequency
+ 1;
1604 PUT_CODE (insn
, NOTE
);
1605 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
1606 df_insn_modify (df
, BLOCK_FOR_INSN (insn
), insn
);
1611 /* Look for spilled webs, on whose behalf no insns were emitted.
1612 We inversify (sp?) the changed flag of the webs, so after this function
1613 a nonzero changed flag means, that this web was not spillable (at least
1617 detect_non_changed_webs ()
1619 struct dlist
*d
, *d_next
;
1620 for (d
= WEBS(SPILLED
); d
; d
= d_next
)
1622 struct web
*web
= DLIST_WEB (d
);
1626 ra_debug_msg (DUMP_PROCESS
, "no insns emitted for spilled web %d\n",
1628 remove_web_from_list (web
);
1629 put_web (web
, COLORED
);
1634 /* From now on web->changed is used as the opposite flag.
1635 I.e. colored webs, which have changed set were formerly
1636 spilled webs for which no insns were emitted. */
1640 /* Before spilling we clear the changed flags for all spilled webs. */
1643 reset_changed_flag ()
1646 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
1647 DLIST_WEB(d
)->changed
= 0;
1650 /* The toplevel function for this file. Given a colorized graph,
1651 and lists of spilled, coalesced and colored webs, we add some
1652 spill code. This also sets up the structures for incrementally
1653 building the interference graph in the next pass. */
1659 bitmap new_deaths
= BITMAP_XMALLOC ();
1660 reset_changed_flag ();
1662 choose_spill_colors ();
1663 useless_defs
= BITMAP_XMALLOC ();
1664 if (flag_ra_improved_spilling
)
1665 rewrite_program2 (new_deaths
);
1667 rewrite_program (new_deaths
);
1668 insert_stores (new_deaths
);
1669 delete_useless_defs ();
1670 BITMAP_XFREE (useless_defs
);
1671 sbitmap_free (insns_with_deaths
);
1672 insns_with_deaths
= sbitmap_alloc (get_max_uid ());
1673 death_insns_max_uid
= get_max_uid ();
1674 sbitmap_zero (insns_with_deaths
);
1675 EXECUTE_IF_SET_IN_BITMAP (new_deaths
, 0, i
,
1676 { SET_BIT (insns_with_deaths
, i
);});
1677 detect_non_changed_webs ();
1678 detect_web_parts_to_rebuild ();
1679 BITMAP_XFREE (new_deaths
);
1682 /* A bitmap of pseudo reg numbers which are coalesced directly
1683 to a hardreg. Set in emit_colors(), used and freed in
1684 remove_suspicious_death_notes(). */
1685 static bitmap regnos_coalesced_to_hardregs
;
1687 /* Create new pseudos for each web we colored, change insns to
1688 use those pseudos and set up ra_reg_renumber. */
1697 int old_max_regno
= max_reg_num ();
1701 /* This bitmap is freed in remove_suspicious_death_notes(),
1702 which is also the user of it. */
1703 regnos_coalesced_to_hardregs
= BITMAP_XMALLOC ();
1704 /* First create the (REG xx) rtx's for all webs, as we need to know
1705 the number, to make sure, flow has enough memory for them in the
1707 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1710 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1712 if (web
->type
== COALESCED
&& alias (web
)->type
== COLORED
)
1714 if (web
->reg_rtx
|| web
->regno
< FIRST_PSEUDO_REGISTER
)
1717 if (web
->regno
>= max_normal_pseudo
)
1720 if (web
->color
== an_unusable_color
)
1722 unsigned int inherent_size
= PSEUDO_REGNO_BYTES (web
->regno
);
1723 unsigned int total_size
= MAX (inherent_size
, 0);
1724 place
= assign_stack_local (PSEUDO_REGNO_MODE (web
->regno
),
1726 inherent_size
== total_size
? 0 : -1);
1727 RTX_UNCHANGING_P (place
) =
1728 RTX_UNCHANGING_P (regno_reg_rtx
[web
->regno
]);
1729 set_mem_alias_set (place
, new_alias_set ());
1733 place
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1735 web
->reg_rtx
= place
;
1739 /* Special case for i386 'fix_truncdi_nomemory' insn.
1740 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1741 Actual only for clobbered register. */
1742 if (web
->num_uses
== 0 && web
->num_defs
== 1)
1743 web
->reg_rtx
= gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web
->defs
[0])));
1745 web
->reg_rtx
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1746 /* Remember the different parts directly coalesced to a hardreg. */
1747 if (web
->type
== COALESCED
)
1748 bitmap_set_bit (regnos_coalesced_to_hardregs
, REGNO (web
->reg_rtx
));
1751 ra_max_regno
= max_regno
= max_reg_num ();
1752 allocate_reg_info (max_regno
, FALSE
, FALSE
);
1753 ra_reg_renumber
= (short *) xmalloc (max_regno
* sizeof (short));
1754 for (si
= 0; si
< max_regno
; si
++)
1755 ra_reg_renumber
[si
] = -1;
1757 /* Then go through all references, and replace them by a new
1758 pseudoreg for each web. All uses. */
1760 Beware: The order of replacements (first uses, then defs) matters only
1761 for read-mod-write insns, where the RTL expression for the REG is
1762 shared between def and use. For normal rmw insns we connected all such
1763 webs, i.e. both the use and the def (which are the same memory)
1764 there get the same new pseudo-reg, so order would not matter.
1765 _However_ we did not connect webs, were the read cycle was an
1766 uninitialized read. If we now would first replace the def reference
1767 and then the use ref, we would initialize it with a REG rtx, which
1768 gets never initialized, and yet more wrong, which would overwrite
1769 the definition of the other REG rtx. So we must replace the defs last.
1771 for (i
= 0; i
< df
->use_id
; i
++)
1774 regset rs
= DF_REF_BB (df
->uses
[i
])->global_live_at_start
;
1777 web
= find_web_for_subweb (web
);
1778 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1780 regrtx
= alias (web
)->reg_rtx
;
1782 regrtx
= web
->reg_rtx
;
1783 *DF_REF_REAL_LOC (df
->uses
[i
]) = regrtx
;
1784 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1786 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1787 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1792 for (i
= 0; i
< df
->def_id
; i
++)
1798 rs
= DF_REF_BB (df
->defs
[i
])->global_live_at_start
;
1800 web
= find_web_for_subweb (web
);
1801 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1803 regrtx
= alias (web
)->reg_rtx
;
1805 regrtx
= web
->reg_rtx
;
1806 *DF_REF_REAL_LOC (df
->defs
[i
]) = regrtx
;
1807 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1809 /* Don't simply clear the current regno, as it might be
1810 replaced by two webs. */
1811 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1812 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1816 /* And now set up the ra_reg_renumber array for reload with all the new
1818 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1821 if (web
->reg_rtx
&& REG_P (web
->reg_rtx
))
1823 int r
= REGNO (web
->reg_rtx
);
1824 ra_reg_renumber
[r
] = web
->color
;
1825 ra_debug_msg (DUMP_COLORIZE
, "Renumber pseudo %d (== web %d) to %d\n",
1826 r
, web
->id
, ra_reg_renumber
[r
]);
1830 old_regs
= BITMAP_XMALLOC ();
1831 for (si
= FIRST_PSEUDO_REGISTER
; si
< old_max_regno
; si
++)
1832 SET_REGNO_REG_SET (old_regs
, si
);
1835 AND_COMPL_REG_SET (bb
->global_live_at_start
, old_regs
);
1836 AND_COMPL_REG_SET (bb
->global_live_at_end
, old_regs
);
1838 BITMAP_XFREE (old_regs
);
1841 /* Delete some coalesced moves from the insn stream. */
1846 struct move_list
*ml
;
1848 /* XXX Beware: We normally would test here each copy insn, if
1849 source and target got the same color (either by coalescing or by pure
1850 luck), and then delete it.
1851 This will currently not work. One problem is, that we don't color
1852 the regs ourself, but instead defer to reload. So the colorization
1853 is only a kind of suggestion, which reload doesn't have to follow.
1854 For webs which are coalesced to a normal colored web, we only have one
1855 new pseudo, so in this case we indeed can delete copy insns involving
1856 those (because even if reload colors them different from our suggestion,
1857 it still has to color them the same, as only one pseudo exists). But for
1858 webs coalesced to precolored ones, we have not a single pseudo, but
1859 instead one for each coalesced web. This means, that we can't delete
1860 copy insns, where source and target are webs coalesced to precolored
1861 ones, because then the connection between both webs is destroyed. Note
1862 that this not only means copy insns, where one side is the precolored one
1863 itself, but also those between webs which are coalesced to one color.
1864 Also because reload we can't delete copy insns which involve any
1865 precolored web at all. These often have also special meaning (e.g.
1866 copying a return value of a call to a pseudo, or copying pseudo to the
1867 return register), and the deletion would confuse reload in thinking the
1868 pseudo isn't needed. One of those days reload will get away and we can
1869 do everything we want.
1870 In effect because of the later reload, we can't base our deletion on the
1871 colors itself, but instead need to base them on the newly created
1873 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
1874 /* The real condition we would ideally use is: s->color == t->color.
1875 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1876 we want to prevent deletion of "special" copies. */
1878 && (s
= alias (ml
->move
->source_web
))->reg_rtx
1879 == (t
= alias (ml
->move
->target_web
))->reg_rtx
1880 && s
->type
!= PRECOLORED
&& t
->type
!= PRECOLORED
)
1882 basic_block bb
= BLOCK_FOR_INSN (ml
->move
->insn
);
1883 df_insn_delete (df
, bb
, ml
->move
->insn
);
1884 deleted_move_insns
++;
1885 deleted_move_cost
+= bb
->frequency
+ 1;
1889 /* Due to reasons documented elsewhere we create different pseudos
1890 for all webs coalesced to hardregs. For these parts life_analysis()
1891 might have added REG_DEAD notes without considering, that only this part
1892 but not the whole coalesced web dies. The RTL is correct, there is no
1893 coalescing yet. But if later reload's alter_reg() substitutes the
1894 hardreg into the REG rtx it looks like that particular hardreg dies here,
1895 although (due to coalescing) it still is live. This might make different
1896 places of reload think, it can use that hardreg for reload regs,
1897 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1898 (Or better teach life_analysis() and reload about our coalescing, but
1899 that comes later) Bah. */
1902 remove_suspicious_death_notes ()
1905 for (insn
= get_insns(); insn
; insn
= NEXT_INSN (insn
))
1908 rtx
*pnote
= ®_NOTES (insn
);
1912 if ((REG_NOTE_KIND (note
) == REG_DEAD
1913 || REG_NOTE_KIND (note
) == REG_UNUSED
)
1914 && (GET_CODE (XEXP (note
, 0)) == REG
1915 && bitmap_bit_p (regnos_coalesced_to_hardregs
,
1916 REGNO (XEXP (note
, 0)))))
1917 *pnote
= XEXP (note
, 1);
1919 pnote
= &XEXP (*pnote
, 1);
1922 BITMAP_XFREE (regnos_coalesced_to_hardregs
);
1923 regnos_coalesced_to_hardregs
= NULL
;
1926 /* Allocate space for max_reg_num() pseudo registers, and
1927 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1928 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1931 setup_renumber (free_it
)
1935 max_regno
= max_reg_num ();
1936 allocate_reg_info (max_regno
, FALSE
, TRUE
);
1937 for (i
= 0; i
< max_regno
; i
++)
1939 reg_renumber
[i
] = (i
< ra_max_regno
) ? ra_reg_renumber
[i
] : -1;
1943 free (ra_reg_renumber
);
1944 ra_reg_renumber
= NULL
;
1949 /* Dump the costs and savings due to spilling, i.e. of added spill insns
1950 and removed moves or useless defs. */
1956 ra_debug_msg (level
, "Instructions for spilling\n added:\n");
1957 ra_debug_msg (level
, " loads =%d cost=", emitted_spill_loads
);
1958 ra_debug_msg (level
, HOST_WIDE_INT_PRINT_UNSIGNED
, spill_load_cost
);
1959 ra_debug_msg (level
, "\n stores=%d cost=", emitted_spill_stores
);
1960 ra_debug_msg (level
, HOST_WIDE_INT_PRINT_UNSIGNED
, spill_store_cost
);
1961 ra_debug_msg (level
, "\n remat =%d cost=", emitted_remat
);
1962 ra_debug_msg (level
, HOST_WIDE_INT_PRINT_UNSIGNED
, spill_remat_cost
);
1963 ra_debug_msg (level
, "\n removed:\n moves =%d cost=", deleted_move_insns
);
1964 ra_debug_msg (level
, HOST_WIDE_INT_PRINT_UNSIGNED
, deleted_move_cost
);
1965 ra_debug_msg (level
, "\n others=%d cost=", deleted_def_insns
);
1966 ra_debug_msg (level
, HOST_WIDE_INT_PRINT_UNSIGNED
, deleted_def_cost
);
1967 ra_debug_msg (level
, "\n");
1970 /* Initialization of the rewrite phase. */
1975 emitted_spill_loads
= 0;
1976 emitted_spill_stores
= 0;
1978 spill_load_cost
= 0;
1979 spill_store_cost
= 0;
1980 spill_remat_cost
= 0;
1981 deleted_move_insns
= 0;
1982 deleted_move_cost
= 0;
1983 deleted_def_insns
= 0;
1984 deleted_def_cost
= 0;
1988 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: