1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
36 #include "insn-config.h"
39 /* This file is part of the graph coloring register allocator, and
40 contains the functions to change the insn stream. I.e. it adds
41 spill code, rewrites insns to use the new registers after
42 coloring and deletes coalesced moves. */
47 static void spill_coalescing (sbitmap
, sbitmap
);
48 static unsigned HOST_WIDE_INT
spill_prop_savings (struct web
*, sbitmap
);
49 static void spill_prop_insert (struct web
*, sbitmap
, sbitmap
);
50 static int spill_propagation (sbitmap
, sbitmap
, sbitmap
);
51 static void spill_coalprop (void);
52 static void allocate_spill_web (struct web
*);
53 static void choose_spill_colors (void);
54 static void rewrite_program (bitmap
);
55 static void remember_slot (struct rtx_list
**, rtx
);
56 static int slots_overlap_p (rtx
, rtx
);
57 static void delete_overlapping_slots (struct rtx_list
**, rtx
);
58 static int slot_member_p (struct rtx_list
*, rtx
);
59 static void insert_stores (bitmap
);
60 static int spill_same_color_p (struct web
*, struct web
*);
61 static bool is_partly_live_1 (sbitmap
, struct web
*);
62 static void update_spill_colors (HARD_REG_SET
*, struct web
*, int);
63 static int spill_is_free (HARD_REG_SET
*, struct web
*);
64 static void emit_loads (struct rewrite_info
*, int, rtx
);
65 static void reloads_to_loads (struct rewrite_info
*, struct ref
**,
66 unsigned int, struct web
**);
67 static void rewrite_program2 (bitmap
);
68 static void mark_refs_for_checking (struct web
*, bitmap
);
69 static void detect_web_parts_to_rebuild (void);
70 static void delete_useless_defs (void);
71 static void detect_non_changed_webs (void);
72 static void reset_changed_flag (void);
74 /* For tracking some statistics, we count the number (and cost)
75 of deleted move insns. */
76 static unsigned int deleted_move_insns
;
77 static unsigned HOST_WIDE_INT deleted_move_cost
;
79 /* This is the spill coalescing phase. In SPILLED the IDs of all
80 already spilled webs are noted. In COALESCED the IDs of webs still
81 to check for coalescing. This tries to coalesce two webs, which were
82 spilled, are connected by a move, and don't conflict. Greatly
83 reduces memory shuffling. */
86 spill_coalescing (sbitmap coalesce
, sbitmap spilled
)
90 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
91 if ((m
= ml
->move
) != NULL
)
93 struct web
*s
= alias (m
->source_web
);
94 struct web
*t
= alias (m
->target_web
);
95 if ((TEST_BIT (spilled
, s
->id
) && TEST_BIT (coalesce
, t
->id
))
96 || (TEST_BIT (spilled
, t
->id
) && TEST_BIT (coalesce
, s
->id
)))
98 struct conflict_link
*wl
;
99 if (TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
100 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
)
101 || s
->pattern
|| t
->pattern
)
104 deleted_move_insns
++;
105 deleted_move_cost
+= BLOCK_FOR_INSN (m
->insn
)->frequency
+ 1;
106 PUT_CODE (m
->insn
, NOTE
);
107 NOTE_LINE_NUMBER (m
->insn
) = NOTE_INSN_DELETED
;
108 df_insn_modify (df
, BLOCK_FOR_INSN (m
->insn
), m
->insn
);
110 m
->target_web
->target_of_spilled_move
= 1;
112 /* May be, already coalesced due to a former move. */
114 /* Merge the nodes S and T in the I-graph. Beware: the merging
115 of conflicts relies on the fact, that in the conflict list
116 of T all of it's conflicts are noted. This is currently not
117 the case if T would be the target of a coalesced web, because
118 then (in combine () above) only those conflicts were noted in
119 T from the web which was coalesced into T, which at the time
120 of combine() were not already on the SELECT stack or were
121 itself coalesced to something other. */
122 if (t
->type
!= SPILLED
|| s
->type
!= SPILLED
)
124 remove_list (t
->dlink
, &WEBS(SPILLED
));
125 put_web (t
, COALESCED
);
130 for (wl
= t
->conflict_list
; wl
; wl
= wl
->next
)
132 struct web
*pweb
= wl
->t
;
134 record_conflict (s
, pweb
);
137 struct sub_conflict
*sl
;
138 for (sl
= wl
->sub
; sl
; sl
= sl
->next
)
140 struct web
*sweb
= NULL
;
141 if (SUBWEB_P (sl
->s
))
142 sweb
= find_subweb (s
, sl
->s
->orig_x
);
145 record_conflict (sweb
, sl
->t
);
148 /* No decrement_degree here, because we already have colored
149 the graph, and don't want to insert pweb into any other
151 pweb
->num_conflicts
-= 1 + t
->add_hardregs
;
157 /* Returns the probable saving of coalescing WEB with webs from
158 SPILLED, in terms of removed move insn cost. */
160 static unsigned HOST_WIDE_INT
161 spill_prop_savings (struct web
*web
, sbitmap spilled
)
163 unsigned HOST_WIDE_INT savings
= 0;
164 struct move_list
*ml
;
169 cost
= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 1);
170 cost
+= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
), web
->regclass
, 0);
171 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
172 if ((m
= ml
->move
) != NULL
)
174 struct web
*s
= alias (m
->source_web
);
175 struct web
*t
= alias (m
->target_web
);
182 if (s
!= web
|| !TEST_BIT (spilled
, t
->id
) || t
->pattern
183 || TEST_BIT (sup_igraph
, s
->id
* num_webs
+ t
->id
)
184 || TEST_BIT (sup_igraph
, t
->id
* num_webs
+ s
->id
))
186 savings
+= BLOCK_FOR_INSN (m
->insn
)->frequency
* cost
;
191 /* This add all IDs of colored webs, which are connected to WEB by a move
192 to LIST and PROCESSED. */
195 spill_prop_insert (struct web
*web
, sbitmap list
, sbitmap processed
)
197 struct move_list
*ml
;
199 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
200 if ((m
= ml
->move
) != NULL
)
202 struct web
*s
= alias (m
->source_web
);
203 struct web
*t
= alias (m
->target_web
);
210 if (s
!= web
|| t
->type
!= COLORED
|| TEST_BIT (processed
, t
->id
))
212 SET_BIT (list
, t
->id
);
213 SET_BIT (processed
, t
->id
);
217 /* The spill propagation pass. If we have to spilled webs, the first
218 connected through a move to a colored one, and the second also connected
219 to that colored one, and this colored web is only used to connect both
220 spilled webs, it might be worthwhile to spill that colored one.
221 This is the case, if the cost of the removed copy insns (all three webs
222 could be placed into the same stack slot) is higher than the spill cost
224 TO_PROP are the webs we try to propagate from (i.e. spilled ones),
225 SPILLED the set of all spilled webs so far and PROCESSED the set
226 of all webs processed so far, so we don't do work twice. */
229 spill_propagation (sbitmap to_prop
, sbitmap spilled
, sbitmap processed
)
233 sbitmap list
= sbitmap_alloc (num_webs
);
236 /* First insert colored move neighbors into the candidate list. */
237 EXECUTE_IF_SET_IN_SBITMAP (to_prop
, 0, id
,
239 spill_prop_insert (ID2WEB (id
), list
, processed
);
241 sbitmap_zero (to_prop
);
243 /* For all candidates, see, if the savings are higher than it's
245 while ((id
= sbitmap_first_set_bit (list
)) >= 0)
247 struct web
*web
= ID2WEB (id
);
248 RESET_BIT (list
, id
);
249 if (spill_prop_savings (web
, spilled
) >= web
->spill_cost
)
251 /* If so, we found a new spilled web. Insert it's colored
252 move neighbors again, and mark, that we need to repeat the
253 whole mainloop of spillprog/coalescing again. */
254 remove_web_from_list (web
);
256 put_web (web
, SPILLED
);
257 SET_BIT (spilled
, id
);
258 SET_BIT (to_prop
, id
);
259 spill_prop_insert (web
, list
, processed
);
267 /* The main phase to improve spill costs. This repeatedly runs
268 spill coalescing and spill propagation, until nothing changes. */
271 spill_coalprop (void)
273 sbitmap spilled
, processed
, to_prop
;
276 spilled
= sbitmap_alloc (num_webs
);
277 processed
= sbitmap_alloc (num_webs
);
278 to_prop
= sbitmap_alloc (num_webs
);
279 sbitmap_zero (spilled
);
280 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
281 SET_BIT (spilled
, DLIST_WEB (d
)->id
);
282 sbitmap_copy (to_prop
, spilled
);
283 sbitmap_zero (processed
);
286 spill_coalescing (to_prop
, spilled
);
287 /* XXX Currently (with optimistic coalescing) spill_propagation()
288 doesn't give better code, sometimes it gives worse (but not by much)
289 code. I believe this is because of slightly wrong cost
290 measurements. Anyway right now it isn't worth the time it takes,
291 so deactivate it for now. */
292 again
= 0 && spill_propagation (to_prop
, spilled
, processed
);
295 sbitmap_free (to_prop
);
296 sbitmap_free (processed
);
297 sbitmap_free (spilled
);
300 /* Allocate a spill slot for a WEB. Currently we spill to pseudo
301 registers, to be able to track also webs for "stack slots", and also
302 to possibly colorize them. These pseudos are sometimes handled
303 in a special way, where we know, that they also can represent
307 allocate_spill_web (struct web
*web
)
309 int regno
= web
->regno
;
313 slot
= gen_reg_rtx (PSEUDO_REGNO_MODE (regno
));
314 web
->stack_slot
= slot
;
317 /* This chooses a color for all SPILLED webs for interference region
318 spilling. The heuristic isn't good in any way. */
321 choose_spill_colors (void)
324 unsigned HOST_WIDE_INT
*costs
= xmalloc (FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
325 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
327 struct web
*web
= DLIST_WEB (d
);
328 struct conflict_link
*wl
;
331 memset (costs
, 0, FIRST_PSEUDO_REGISTER
* sizeof (costs
[0]));
332 for (wl
= web
->conflict_list
; wl
; wl
= wl
->next
)
334 struct web
*pweb
= wl
->t
;
335 if (pweb
->type
== COLORED
|| pweb
->type
== PRECOLORED
)
336 costs
[pweb
->color
] += pweb
->spill_cost
;
339 COPY_HARD_REG_SET (avail
, web
->usable_regs
);
340 if (web
->crosses_call
)
342 /* Add an arbitrary constant cost to colors not usable by
343 call-crossing webs without saves/loads. */
344 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
345 if (TEST_HARD_REG_BIT (call_used_reg_set
, c
))
349 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
350 if ((bestc
< 0 || costs
[bestc
] > costs
[c
])
351 && TEST_HARD_REG_BIT (avail
, c
)
352 && HARD_REGNO_MODE_OK (c
, PSEUDO_REGNO_MODE (web
->regno
)))
355 size
= HARD_REGNO_NREGS (c
, PSEUDO_REGNO_MODE (web
->regno
));
357 && TEST_HARD_REG_BIT (avail
, c
+ i
); i
++);
362 ra_debug_msg (DUMP_PROCESS
, "choosing color %d for spilled web %d\n",
369 /* For statistics sake we count the number and cost of all new loads,
370 stores and emitted rematerializations. */
371 static unsigned int emitted_spill_loads
;
372 static unsigned int emitted_spill_stores
;
373 static unsigned int emitted_remat
;
374 static unsigned HOST_WIDE_INT spill_load_cost
;
375 static unsigned HOST_WIDE_INT spill_store_cost
;
376 static unsigned HOST_WIDE_INT spill_remat_cost
;
378 /* In rewrite_program2() we detect if some def us useless, in the sense,
379 that the pseudo set is not live anymore at that point. The REF_IDs
380 of such defs are noted here. */
381 static bitmap useless_defs
;
383 /* This is the simple and fast version of rewriting the program to
384 include spill code. It spills at every insn containing spilled
385 defs or uses. Loads are added only if flag_ra_spill_every_use is
386 nonzero, otherwise only stores will be added. This doesn't
387 support rematerialization.
388 NEW_DEATHS is filled with uids for insns, which probably contain
392 rewrite_program (bitmap new_deaths
)
396 bitmap b
= BITMAP_XMALLOC ();
398 /* We walk over all webs, over all uses/defs. For all webs, we need
399 to look at spilled webs, and webs coalesced to spilled ones, in case
400 their alias isn't broken up, or they got spill coalesced. */
401 for (i
= 0; i
< 2; i
++)
402 for (d
= (i
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
404 struct web
*web
= DLIST_WEB (d
);
405 struct web
*aweb
= alias (web
);
409 /* Is trivially true for spilled webs, but not for coalesced ones. */
410 if (aweb
->type
!= SPILLED
)
413 /* First add loads before every use, if we have to. */
414 if (flag_ra_spill_every_use
)
417 allocate_spill_web (aweb
);
418 slot
= aweb
->stack_slot
;
419 for (j
= 0; j
< web
->num_uses
; j
++)
421 rtx insns
, target
, source
;
422 rtx insn
= DF_REF_INSN (web
->uses
[j
]);
423 rtx prev
= PREV_INSN (insn
);
424 basic_block bb
= BLOCK_FOR_INSN (insn
);
425 /* Happens when spill_coalescing() deletes move insns. */
429 /* Check that we didn't already added a load for this web
430 and insn. Happens, when the an insn uses the same web
432 if (bitmap_bit_p (b
, INSN_UID (insn
)))
434 bitmap_set_bit (b
, INSN_UID (insn
));
435 target
= DF_REF_REG (web
->uses
[j
]);
438 if (GET_CODE (target
) == SUBREG
)
439 source
= simplify_gen_subreg (GET_MODE (target
), source
,
441 SUBREG_BYTE (target
));
442 ra_emit_move_insn (target
, source
);
443 insns
= get_insns ();
445 emit_insn_before (insns
, insn
);
447 if (BB_HEAD (bb
) == insn
)
448 BB_HEAD (bb
) = NEXT_INSN (prev
);
449 for (insn
= PREV_INSN (insn
); insn
!= prev
;
450 insn
= PREV_INSN (insn
))
452 set_block_for_insn (insn
, bb
);
453 df_insn_modify (df
, bb
, insn
);
456 emitted_spill_loads
++;
457 spill_load_cost
+= bb
->frequency
+ 1;
461 /* Now emit the stores after each def.
462 If any uses were loaded from stackslots (compared to
463 rematerialized or not reloaded due to IR spilling),
464 aweb->stack_slot will be set. If not, we don't need to emit
466 slot
= aweb
->stack_slot
;
469 for (j
= 0; j
< web
->num_defs
; j
++)
471 rtx insns
, source
, dest
;
472 rtx insn
= DF_REF_INSN (web
->defs
[j
]);
473 rtx following
= NEXT_INSN (insn
);
474 basic_block bb
= BLOCK_FOR_INSN (insn
);
475 /* Happens when spill_coalescing() deletes move insns. */
478 if (bitmap_bit_p (b
, INSN_UID (insn
)))
480 bitmap_set_bit (b
, INSN_UID (insn
));
482 source
= DF_REF_REG (web
->defs
[j
]);
484 if (GET_CODE (source
) == SUBREG
)
485 dest
= simplify_gen_subreg (GET_MODE (source
), dest
,
487 SUBREG_BYTE (source
));
488 ra_emit_move_insn (dest
, source
);
490 insns
= get_insns ();
494 emit_insn_after (insns
, insn
);
495 if (BB_END (bb
) == insn
)
496 BB_END (bb
) = PREV_INSN (following
);
497 for (insn
= insns
; insn
!= following
; insn
= NEXT_INSN (insn
))
499 set_block_for_insn (insn
, bb
);
500 df_insn_modify (df
, bb
, insn
);
504 df_insn_modify (df
, bb
, insn
);
505 emitted_spill_stores
++;
506 spill_store_cost
+= bb
->frequency
+ 1;
507 /* XXX we should set new_deaths for all inserted stores
508 whose pseudo dies here.
509 Note, that this isn't the case for _all_ stores. */
510 /* I.e. the next is wrong, and might cause some spilltemps
511 to be categorized as spilltemp2's (i.e. live over a death),
512 although they aren't. This might make them spill again,
513 which causes endlessness in the case, this insn is in fact
515 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
522 /* A simple list of rtx's. */
525 struct rtx_list
*next
;
529 /* Adds X to *LIST. */
532 remember_slot (struct rtx_list
**list
, rtx x
)
535 /* PRE: X is not already in LIST. */
536 l
= ra_alloc (sizeof (*l
));
542 /* Given two rtx' S1 and S2, either being REGs or MEMs (or SUBREGs
543 thereof), return nonzero, if they overlap. REGs and MEMs don't
544 overlap, and if they are MEMs they must have an easy address
545 (plus (basereg) (const_inst x)), otherwise they overlap. */
548 slots_overlap_p (rtx s1
, rtx s2
)
551 HOST_WIDE_INT ofs1
= 0, ofs2
= 0;
552 int size1
= GET_MODE_SIZE (GET_MODE (s1
));
553 int size2
= GET_MODE_SIZE (GET_MODE (s2
));
554 if (GET_CODE (s1
) == SUBREG
)
555 ofs1
= SUBREG_BYTE (s1
), s1
= SUBREG_REG (s1
);
556 if (GET_CODE (s2
) == SUBREG
)
557 ofs2
= SUBREG_BYTE (s2
), s2
= SUBREG_REG (s2
);
562 if (GET_CODE (s1
) != GET_CODE (s2
))
565 if (GET_CODE (s1
) == REG
&& GET_CODE (s2
) == REG
)
567 if (REGNO (s1
) != REGNO (s2
))
569 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
573 if (GET_CODE (s1
) != MEM
|| GET_CODE (s2
) != MEM
)
577 if (GET_CODE (s1
) != PLUS
|| GET_CODE (XEXP (s1
, 0)) != REG
578 || GET_CODE (XEXP (s1
, 1)) != CONST_INT
)
580 if (GET_CODE (s2
) != PLUS
|| GET_CODE (XEXP (s2
, 0)) != REG
581 || GET_CODE (XEXP (s2
, 1)) != CONST_INT
)
583 base1
= XEXP (s1
, 0);
584 base2
= XEXP (s2
, 0);
585 if (!rtx_equal_p (base1
, base2
))
587 ofs1
+= INTVAL (XEXP (s1
, 1));
588 ofs2
+= INTVAL (XEXP (s2
, 1));
589 if (ofs1
>= ofs2
+ size2
|| ofs2
>= ofs1
+ size1
)
594 /* This deletes from *LIST all rtx's which overlap with X in the sense
595 of slots_overlap_p(). */
598 delete_overlapping_slots (struct rtx_list
**list
, rtx x
)
602 if (slots_overlap_p ((*list
)->x
, x
))
603 *list
= (*list
)->next
;
605 list
= &((*list
)->next
);
609 /* Returns nonzero, of X is member of LIST. */
612 slot_member_p (struct rtx_list
*list
, rtx x
)
614 for (;list
; list
= list
->next
)
615 if (rtx_equal_p (list
->x
, x
))
620 /* A more sophisticated (and slower) method of adding the stores, than
621 rewrite_program(). This goes backward the insn stream, adding
622 stores as it goes, but only if it hasn't just added a store to the
623 same location. NEW_DEATHS is a bitmap filled with uids of insns
624 containing deaths. */
627 insert_stores (bitmap new_deaths
)
630 rtx last_slot
= NULL_RTX
;
631 struct rtx_list
*slots
= NULL
;
633 /* We go simply backwards over basic block borders. */
634 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
636 int uid
= INSN_UID (insn
);
638 /* If we reach a basic block border, which has more than one
639 outgoing edge, we simply forget all already emitted stores. */
640 if (GET_CODE (insn
) == BARRIER
641 || JUMP_P (insn
) || can_throw_internal (insn
))
643 last_slot
= NULL_RTX
;
649 /* If this insn was not just added in this pass. */
650 if (uid
< insn_df_max_uid
)
653 rtx following
= NEXT_INSN (insn
);
654 basic_block bb
= BLOCK_FOR_INSN (insn
);
655 struct ra_insn_info info
;
658 for (n
= 0; n
< info
.num_defs
; n
++)
660 struct web
*web
= def2web
[DF_REF_ID (info
.defs
[n
])];
661 struct web
*aweb
= alias (find_web_for_subweb (web
));
663 if (aweb
->type
!= SPILLED
|| !aweb
->stack_slot
)
665 slot
= aweb
->stack_slot
;
666 source
= DF_REF_REG (info
.defs
[n
]);
667 /* adjust_address() might generate code. */
669 if (GET_CODE (source
) == SUBREG
)
670 slot
= simplify_gen_subreg (GET_MODE (source
), slot
,
672 SUBREG_BYTE (source
));
673 /* If we have no info about emitted stores, or it didn't
674 contain the location we intend to use soon, then
676 if ((!last_slot
|| !rtx_equal_p (slot
, last_slot
))
677 && ! slot_member_p (slots
, slot
))
681 remember_slot (&slots
, slot
);
682 ra_emit_move_insn (slot
, source
);
683 insns
= get_insns ();
687 emit_insn_after (insns
, insn
);
688 if (BB_END (bb
) == insn
)
689 BB_END (bb
) = PREV_INSN (following
);
690 for (ni
= insns
; ni
!= following
; ni
= NEXT_INSN (ni
))
692 set_block_for_insn (ni
, bb
);
693 df_insn_modify (df
, bb
, ni
);
697 df_insn_modify (df
, bb
, insn
);
698 emitted_spill_stores
++;
699 spill_store_cost
+= bb
->frequency
+ 1;
700 bitmap_set_bit (new_deaths
, INSN_UID (PREV_INSN (following
)));
704 /* Otherwise ignore insns from adjust_address() above. */
709 /* If we look at a load generated by the allocator, forget
710 the last emitted slot, and additionally clear all slots
711 overlapping it's source (after all, we need it again). */
712 /* XXX If we emit the stack-ref directly into the using insn the
713 following needs a change, because that is no new insn. Preferably
714 we would add some notes to the insn, what stackslots are needed
716 if (uid
>= last_max_uid
)
718 rtx set
= single_set (insn
);
719 last_slot
= NULL_RTX
;
720 /* If this was no simple set, give up, and forget everything. */
725 if (1 || GET_CODE (SET_SRC (set
)) == MEM
)
726 delete_overlapping_slots (&slots
, SET_SRC (set
));
732 /* Returns 1 if both colored webs have some hardregs in common, even if
733 they are not the same width. */
736 spill_same_color_p (struct web
*web1
, struct web
*web2
)
738 int c1
, size1
, c2
, size2
;
739 if ((c1
= alias (web1
)->color
) < 0 || c1
== an_unusable_color
)
741 if ((c2
= alias (web2
)->color
) < 0 || c2
== an_unusable_color
)
744 size1
= web1
->type
== PRECOLORED
745 ? 1 : HARD_REGNO_NREGS (c1
, PSEUDO_REGNO_MODE (web1
->regno
));
746 size2
= web2
->type
== PRECOLORED
747 ? 1 : HARD_REGNO_NREGS (c2
, PSEUDO_REGNO_MODE (web2
->regno
));
748 if (c1
>= c2
+ size2
|| c2
>= c1
+ size1
)
753 /* Given the set of live web IDs LIVE, returns nonzero, if any of WEBs
754 subwebs (or WEB itself) is live. */
757 is_partly_live_1 (sbitmap live
, struct web
*web
)
760 if (TEST_BIT (live
, web
->id
))
762 while ((web
= web
->subreg_next
));
766 /* Fast version in case WEB has no subwebs. */
767 #define is_partly_live(live, web) ((!web->subreg_next) \
768 ? TEST_BIT (live, web->id) \
769 : is_partly_live_1 (live, web))
771 /* Change the set of currently IN_USE colors according to
772 WEB's color. Either add those colors to the hardreg set (if ADD
773 is nonzero), or remove them. */
776 update_spill_colors (HARD_REG_SET
*in_use
, struct web
*web
, int add
)
779 if ((c
= alias (find_web_for_subweb (web
))->color
) < 0
780 || c
== an_unusable_color
)
782 size
= HARD_REGNO_NREGS (c
, GET_MODE (web
->orig_x
));
785 c
+= subreg_regno_offset (c
, GET_MODE (SUBREG_REG (web
->orig_x
)),
786 SUBREG_BYTE (web
->orig_x
),
787 GET_MODE (web
->orig_x
));
789 else if (web
->type
== PRECOLORED
)
793 SET_HARD_REG_BIT (*in_use
, c
+ size
);
796 CLEAR_HARD_REG_BIT (*in_use
, c
+ size
);
799 /* Given a set of hardregs currently IN_USE and the color C of WEB,
800 return -1 if WEB has no color, 1 of it has the unusable color,
801 0 if one of it's used hardregs are in use, and 1 otherwise.
802 Generally, if WEB can't be left colorized return 1. */
805 spill_is_free (HARD_REG_SET
*in_use
, struct web
*web
)
808 if ((c
= alias (web
)->color
) < 0)
810 if (c
== an_unusable_color
)
812 size
= web
->type
== PRECOLORED
813 ? 1 : HARD_REGNO_NREGS (c
, PSEUDO_REGNO_MODE (web
->regno
));
815 if (TEST_HARD_REG_BIT (*in_use
, c
+ size
))
821 /* Structure for passing between rewrite_program2() and emit_loads(). */
824 /* The web IDs which currently would need a reload. These are
825 currently live spilled webs, whose color was still free. */
827 /* We need a scratch bitmap, but don't want to allocate one a zillion
830 /* Web IDs of currently live webs. This are the precise IDs,
831 not just those of the superwebs. If only on part is live, only
832 that ID is placed here. */
834 /* An array of webs, which currently need a load added.
835 They will be emitted when seeing the first death. */
836 struct web
**needed_loads
;
837 /* The current number of entries in needed_loads. */
839 /* The number of bits set in need_reload. */
841 /* The current set of hardregs not available. */
842 HARD_REG_SET colors_in_use
;
843 /* Nonzero, if we just added some spill temps to need_reload or
844 needed_loads. In this case we don't wait for the next death
845 to emit their loads. */
846 int any_spilltemps_spilled
;
847 /* Nonzero, if we currently need to emit the loads. E.g. when we
848 saw an insn containing deaths. */
852 /* The needed_loads list of RI contains some webs for which
853 we add the actual load insns here. They are added just before
854 their use last seen. NL_FIRST_RELOAD is the index of the first
855 load which is a converted reload, all other entries are normal
856 loads. LAST_BLOCK_INSN is the last insn of the current basic block. */
859 emit_loads (struct rewrite_info
*ri
, int nl_first_reload
, rtx last_block_insn
)
862 for (j
= ri
->nl_size
; j
;)
864 struct web
*web
= ri
->needed_loads
[--j
];
868 rtx before
= NULL_RTX
, after
= NULL_RTX
;
870 /* When spilltemps were spilled for the last insns, their
871 loads already are emitted, which is noted by setting
872 needed_loads[] for it to 0. */
875 supweb
= find_web_for_subweb (web
);
876 if (supweb
->regno
>= max_normal_pseudo
)
878 /* Check for web being a spilltemp, if we only want to
879 load spilltemps. Also remember, that we emitted that
880 load, which we don't need to do when we have a death,
881 because then all of needed_loads[] is emptied. */
884 if (!supweb
->spill_temp
)
887 ri
->needed_loads
[j
] = 0;
890 /* The adding of reloads doesn't depend on liveness. */
891 if (j
< nl_first_reload
&& !TEST_BIT (ri
->live
, web
->id
))
893 aweb
= alias (supweb
);
898 /* XXX If we later allow non-constant sources for rematerialization
899 we must also disallow coalescing _to_ rematerialized webs
900 (at least then disallow spilling them, which we already ensure
901 when flag_ra_break_aliases), or not take the pattern but a
905 slot
= copy_rtx (supweb
->pattern
);
906 reg
= copy_rtx (supweb
->orig_x
);
907 /* Sanity check. orig_x should be a REG rtx, which should be
908 shared over all RTL, so copy_rtx should have no effect. */
909 if (reg
!= supweb
->orig_x
)
914 allocate_spill_web (aweb
);
915 slot
= aweb
->stack_slot
;
917 /* If we don't copy the RTL there might be some SUBREG
918 rtx shared in the next iteration although being in
919 different webs, which leads to wrong code. */
920 reg
= copy_rtx (web
->orig_x
);
921 if (GET_CODE (reg
) == SUBREG
)
922 /*slot = adjust_address (slot, GET_MODE (reg), SUBREG_BYTE
924 slot
= simplify_gen_subreg (GET_MODE (reg
), slot
, GET_MODE (slot
),
927 ra_emit_move_insn (reg
, slot
);
930 before
= web
->last_use_insn
;
931 web
->last_use_insn
= NULL_RTX
;
934 if (JUMP_P (last_block_insn
))
935 before
= last_block_insn
;
937 after
= last_block_insn
;
941 rtx foll
= NEXT_INSN (after
);
942 bb
= BLOCK_FOR_INSN (after
);
943 emit_insn_after (ni
, after
);
944 if (BB_END (bb
) == after
)
945 BB_END (bb
) = PREV_INSN (foll
);
946 for (ni
= NEXT_INSN (after
); ni
!= foll
; ni
= NEXT_INSN (ni
))
948 set_block_for_insn (ni
, bb
);
949 df_insn_modify (df
, bb
, ni
);
954 rtx prev
= PREV_INSN (before
);
955 bb
= BLOCK_FOR_INSN (before
);
956 emit_insn_before (ni
, before
);
957 if (BB_HEAD (bb
) == before
)
958 BB_HEAD (bb
) = NEXT_INSN (prev
);
959 for (; ni
!= before
; ni
= NEXT_INSN (ni
))
961 set_block_for_insn (ni
, bb
);
962 df_insn_modify (df
, bb
, ni
);
968 spill_remat_cost
+= bb
->frequency
+ 1;
972 emitted_spill_loads
++;
973 spill_load_cost
+= bb
->frequency
+ 1;
975 RESET_BIT (ri
->live
, web
->id
);
976 /* In the special case documented above only emit the reloads and
978 if (ri
->need_load
== 2 && j
< nl_first_reload
)
985 /* Given a set of reloads in RI, an array of NUM_REFS references (either
986 uses or defs) in REFS, and REF2WEB to translate ref IDs to webs
987 (either use2web or def2web) convert some reloads to loads.
988 This looks at the webs referenced, and how they change the set of
989 available colors. Now put all still live webs, which needed reloads,
990 and whose colors isn't free anymore, on the needed_loads list. */
993 reloads_to_loads (struct rewrite_info
*ri
, struct ref
**refs
,
994 unsigned int num_refs
, struct web
**ref2web
)
997 int num_reloads
= ri
->num_reloads
;
998 for (n
= 0; n
< num_refs
&& num_reloads
; n
++)
1000 struct web
*web
= ref2web
[DF_REF_ID (refs
[n
])];
1001 struct web
*supweb
= find_web_for_subweb (web
);
1004 /* Only emit reloads when entering their interference
1005 region. A use of a spilled web never opens an
1006 interference region, independent of it's color. */
1007 if (alias (supweb
)->type
== SPILLED
)
1009 if (supweb
->type
== PRECOLORED
1010 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1012 /* Note, that if web (and supweb) are DEFs, we already cleared
1013 the corresponding bits in live. I.e. is_death becomes true, which
1015 is_death
= !TEST_BIT (ri
->live
, supweb
->id
);
1016 is_death
&= !TEST_BIT (ri
->live
, web
->id
);
1019 int old_num_r
= num_reloads
;
1020 bitmap_clear (ri
->scratch
);
1021 EXECUTE_IF_SET_IN_BITMAP (ri
->need_reload
, 0, j
,
1023 struct web
*web2
= ID2WEB (j
);
1024 struct web
*aweb2
= alias (find_web_for_subweb (web2
));
1025 if (spill_is_free (&(ri
->colors_in_use
), aweb2
) == 0)
1027 if (spill_same_color_p (supweb
, aweb2
)
1028 /* && interfere (web, web2) */)
1032 ri
->needed_loads
[ri
->nl_size
++] = web2
;
1035 bitmap_set_bit (ri
->scratch
, j
);
1039 if (num_reloads
!= old_num_r
)
1040 bitmap_operation (ri
->need_reload
, ri
->need_reload
, ri
->scratch
,
1044 ri
->num_reloads
= num_reloads
;
1047 /* This adds loads for spilled webs to the program. It uses a kind of
1048 interference region spilling. If flag_ra_ir_spilling is zero it
1049 only uses improved chaitin spilling (adding loads only at insns
1050 containing deaths). */
1053 rewrite_program2 (bitmap new_deaths
)
1055 basic_block bb
= NULL
;
1056 int nl_first_reload
;
1057 struct rewrite_info ri
;
1059 ri
.needed_loads
= xmalloc (num_webs
* sizeof (struct web
*));
1060 ri
.need_reload
= BITMAP_XMALLOC ();
1061 ri
.scratch
= BITMAP_XMALLOC ();
1062 ri
.live
= sbitmap_alloc (num_webs
);
1065 for (insn
= get_last_insn (); insn
; insn
= PREV_INSN (insn
))
1067 basic_block last_bb
= NULL
;
1068 rtx last_block_insn
;
1071 insn
= prev_real_insn (insn
);
1072 while (insn
&& !(bb
= BLOCK_FOR_INSN (insn
)))
1073 insn
= prev_real_insn (insn
);
1077 last_block_insn
= insn
;
1079 sbitmap_zero (ri
.live
);
1080 CLEAR_HARD_REG_SET (ri
.colors_in_use
);
1081 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[i
- 2], 0, j
,
1083 struct web
*web
= use2web
[j
];
1084 struct web
*aweb
= alias (find_web_for_subweb (web
));
1085 /* A web is only live at end, if it isn't spilled. If we wouldn't
1086 check this, the last uses of spilled web per basic block
1087 wouldn't be detected as deaths, although they are in the final
1088 code. This would lead to cumulating many loads without need,
1089 only increasing register pressure. */
1090 /* XXX do add also spilled webs which got a color for IR spilling.
1091 Remember to not add to colors_in_use in that case. */
1092 if (aweb
->type
!= SPILLED
/*|| aweb->color >= 0*/)
1094 SET_BIT (ri
.live
, web
->id
);
1095 if (aweb
->type
!= SPILLED
)
1096 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1100 bitmap_clear (ri
.need_reload
);
1102 ri
.any_spilltemps_spilled
= 0;
1103 if (flag_ra_ir_spilling
)
1107 /* XXX If we don't add spilled nodes into live above, the following
1108 becomes an empty loop. */
1109 for (pass
= 0; pass
< 2; pass
++)
1110 for (d
= (pass
) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1112 struct web
*web
= DLIST_WEB (d
);
1113 struct web
*aweb
= alias (web
);
1114 if (aweb
->type
!= SPILLED
)
1116 if (is_partly_live (ri
.live
, web
)
1117 && spill_is_free (&(ri
.colors_in_use
), web
) > 0)
1120 bitmap_set_bit (ri
.need_reload
, web
->id
);
1121 /* Last using insn is somewhere in another block. */
1122 web
->last_use_insn
= NULL_RTX
;
1128 for (; insn
; insn
= PREV_INSN (insn
))
1130 struct ra_insn_info info
;
1133 if (INSN_P (insn
) && BLOCK_FOR_INSN (insn
) != last_bb
)
1135 int index
= BLOCK_FOR_INSN (insn
)->index
+ 2;
1136 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[index
- 2], 0, j
,
1138 struct web
*web
= use2web
[j
];
1139 struct web
*aweb
= alias (find_web_for_subweb (web
));
1140 if (aweb
->type
!= SPILLED
)
1142 SET_BIT (ri
.live
, web
->id
);
1143 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1146 bitmap_clear (ri
.scratch
);
1147 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1149 struct web
*web2
= ID2WEB (j
);
1150 struct web
*supweb2
= find_web_for_subweb (web2
);
1151 struct web
*aweb2
= alias (supweb2
);
1152 if (spill_is_free (&(ri
.colors_in_use
), aweb2
) <= 0)
1156 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1159 bitmap_set_bit (ri
.scratch
, j
);
1163 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1165 last_bb
= BLOCK_FOR_INSN (insn
);
1166 last_block_insn
= insn
;
1167 if (!INSN_P (last_block_insn
))
1168 last_block_insn
= prev_real_insn (last_block_insn
);
1173 info
= insn_df
[INSN_UID (insn
)];
1176 for (n
= 0; n
< info
.num_defs
; n
++)
1178 struct ref
*ref
= info
.defs
[n
];
1179 struct web
*web
= def2web
[DF_REF_ID (ref
)];
1180 struct web
*supweb
= find_web_for_subweb (web
);
1184 supweb
= find_web_for_subweb (web
);
1185 /* Webs which are defined here, but also used in the same insn
1186 are rmw webs, or this use isn't a death because of looping
1187 constructs. In neither case makes this def available it's
1188 resources. Reloads for it are still needed, it's still
1189 live and it's colors don't become free. */
1190 for (n2
= 0; n2
< info
.num_uses
; n2
++)
1192 struct web
*web2
= use2web
[DF_REF_ID (info
.uses
[n2
])];
1193 if (supweb
== find_web_for_subweb (web2
))
1202 if (!is_partly_live (ri
.live
, supweb
))
1203 bitmap_set_bit (useless_defs
, DF_REF_ID (ref
));
1205 RESET_BIT (ri
.live
, web
->id
);
1206 if (bitmap_bit_p (ri
.need_reload
, web
->id
))
1209 bitmap_clear_bit (ri
.need_reload
, web
->id
);
1213 /* XXX subwebs aren't precisely tracked here. We have
1214 everything we need (inverse webs), but the code isn't
1215 yet written. We need to make all completely
1216 overlapping web parts non-live here. */
1217 /* If by luck now the whole web isn't live anymore, no
1218 reloads for it are needed. */
1219 if (!is_partly_live (ri
.live
, supweb
)
1220 && bitmap_bit_p (ri
.need_reload
, supweb
->id
))
1223 bitmap_clear_bit (ri
.need_reload
, supweb
->id
);
1229 /* If the whole web is defined here, no parts of it are
1230 live anymore and no reloads are needed for them. */
1231 for (sweb
= supweb
->subreg_next
; sweb
;
1232 sweb
= sweb
->subreg_next
)
1234 RESET_BIT (ri
.live
, sweb
->id
);
1235 if (bitmap_bit_p (ri
.need_reload
, sweb
->id
))
1238 bitmap_clear_bit (ri
.need_reload
, sweb
->id
);
1242 if (alias (supweb
)->type
!= SPILLED
)
1243 update_spill_colors (&(ri
.colors_in_use
), web
, 0);
1246 nl_first_reload
= ri
.nl_size
;
1248 /* CALL_INSNs are not really deaths, but still more registers
1249 are free after a call, than before.
1250 XXX Note, that sometimes reload barfs when we emit insns between
1251 a call and the insn which copies the return register into a
1253 if (GET_CODE (insn
) == CALL_INSN
)
1255 else if (INSN_P (insn
))
1256 for (n
= 0; n
< info
.num_uses
; n
++)
1258 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1259 struct web
*supweb
= find_web_for_subweb (web
);
1261 if (supweb
->type
== PRECOLORED
1262 && TEST_HARD_REG_BIT (never_use_colors
, supweb
->color
))
1264 is_death
= !TEST_BIT (ri
.live
, supweb
->id
);
1265 is_death
&= !TEST_BIT (ri
.live
, web
->id
);
1269 bitmap_set_bit (new_deaths
, INSN_UID (insn
));
1274 if (INSN_P (insn
) && ri
.num_reloads
)
1276 int old_num_reloads
= ri
.num_reloads
;
1277 reloads_to_loads (&ri
, info
.uses
, info
.num_uses
, use2web
);
1279 /* If this insn sets a pseudo, which isn't used later
1280 (i.e. wasn't live before) it is a dead store. We need
1281 to emit all reloads which have the same color as this def.
1282 We don't need to check for non-liveness here to detect
1283 the deadness (it anyway is too late, as we already cleared
1284 the liveness in the first loop over the defs), because if it
1285 _would_ be live here, no reload could have that color, as
1286 they would already have been converted to a load. */
1288 reloads_to_loads (&ri
, info
.defs
, info
.num_defs
, def2web
);
1289 if (ri
.num_reloads
!= old_num_reloads
&& !ri
.need_load
)
1293 if (ri
.nl_size
&& (ri
.need_load
|| ri
.any_spilltemps_spilled
))
1294 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1296 if (INSN_P (insn
) && flag_ra_ir_spilling
)
1297 for (n
= 0; n
< info
.num_uses
; n
++)
1299 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1300 struct web
*aweb
= alias (find_web_for_subweb (web
));
1301 if (aweb
->type
!= SPILLED
)
1302 update_spill_colors (&(ri
.colors_in_use
), web
, 1);
1305 ri
.any_spilltemps_spilled
= 0;
1307 for (n
= 0; n
< info
.num_uses
; n
++)
1309 struct web
*web
= use2web
[DF_REF_ID (info
.uses
[n
])];
1310 struct web
*supweb
= find_web_for_subweb (web
);
1311 struct web
*aweb
= alias (supweb
);
1312 SET_BIT (ri
.live
, web
->id
);
1313 if (aweb
->type
!= SPILLED
)
1315 if (supweb
->spill_temp
)
1316 ri
.any_spilltemps_spilled
= 1;
1317 web
->last_use_insn
= insn
;
1320 if (spill_is_free (&(ri
.colors_in_use
), aweb
) <= 0
1321 || !flag_ra_ir_spilling
)
1323 ri
.needed_loads
[ri
.nl_size
++] = web
;
1327 else if (!bitmap_bit_p (ri
.need_reload
, web
->id
))
1329 bitmap_set_bit (ri
.need_reload
, web
->id
);
1340 if (GET_CODE (insn
) == CODE_LABEL
)
1344 nl_first_reload
= ri
.nl_size
;
1350 HARD_REG_SET cum_colors
, colors
;
1351 CLEAR_HARD_REG_SET (cum_colors
);
1352 for (e
= bb
->pred
; e
&& num
< 5; e
= e
->pred_next
, num
++)
1355 CLEAR_HARD_REG_SET (colors
);
1356 EXECUTE_IF_SET_IN_BITMAP (live_at_end
[e
->src
->index
], 0, j
,
1358 struct web
*web
= use2web
[j
];
1359 struct web
*aweb
= alias (find_web_for_subweb (web
));
1360 if (aweb
->type
!= SPILLED
)
1361 update_spill_colors (&colors
, web
, 1);
1363 IOR_HARD_REG_SET (cum_colors
, colors
);
1368 bitmap_clear (ri
.scratch
);
1369 EXECUTE_IF_SET_IN_BITMAP (ri
.need_reload
, 0, j
,
1371 struct web
*web2
= ID2WEB (j
);
1372 struct web
*supweb2
= find_web_for_subweb (web2
);
1373 struct web
*aweb2
= alias (supweb2
);
1374 /* block entry is IR boundary for aweb2?
1375 Currently more some tries for good conditions. */
1376 if (((ra_pass
> 0 || supweb2
->target_of_spilled_move
)
1377 && (1 || in_ir
|| spill_is_free (&cum_colors
, aweb2
) <= 0))
1380 || spill_is_free (&cum_colors
, aweb2
) <= 0)))
1384 ri
.needed_loads
[ri
.nl_size
++] = web2
;
1387 bitmap_set_bit (ri
.scratch
, j
);
1391 bitmap_operation (ri
.need_reload
, ri
.need_reload
, ri
.scratch
,
1396 emit_loads (&ri
, nl_first_reload
, last_block_insn
);
1397 if (ri
.nl_size
!= 0 /*|| ri.num_reloads != 0*/)
1402 free (ri
.needed_loads
);
1403 sbitmap_free (ri
.live
);
1404 BITMAP_XFREE (ri
.scratch
);
1405 BITMAP_XFREE (ri
.need_reload
);
1408 /* WEBS is a web conflicting with a spilled one. Prepare it
1409 to be able to rescan it in the next pass. Mark all it's uses
1410 for checking, and clear the some members of their web parts
1411 (of defs and uses). Notably don't clear the uplink. We don't
1412 change the layout of this web, just it's conflicts.
1413 Also remember all IDs of its uses in USES_AS_BITMAP. */
1416 mark_refs_for_checking (struct web
*web
, bitmap uses_as_bitmap
)
1419 for (i
= 0; i
< web
->num_uses
; i
++)
1421 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1422 SET_BIT (last_check_uses
, id
);
1423 bitmap_set_bit (uses_as_bitmap
, id
);
1424 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1425 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1427 for (i
= 0; i
< web
->num_defs
; i
++)
1429 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1430 web_parts
[id
].spanned_deaths
= 0;
1431 web_parts
[id
].crosses_call
= 0;
1435 /* The last step of the spill phase is to set up the structures for
1436 incrementally rebuilding the interference graph. We break up
1437 the web part structure of all spilled webs, mark their uses for
1438 rechecking, look at their neighbors, and clean up some global
1439 information, we will rebuild. */
1442 detect_web_parts_to_rebuild (void)
1444 bitmap uses_as_bitmap
;
1445 unsigned int i
, pass
;
1447 sbitmap already_webs
= sbitmap_alloc (num_webs
);
1449 uses_as_bitmap
= BITMAP_XMALLOC ();
1450 if (last_check_uses
)
1451 sbitmap_free (last_check_uses
);
1452 last_check_uses
= sbitmap_alloc (df
->use_id
);
1453 sbitmap_zero (last_check_uses
);
1454 sbitmap_zero (already_webs
);
1455 /* We need to recheck all uses of all webs involved in spilling (and the
1456 uses added by spill insns, but those are not analyzed yet).
1457 Those are the spilled webs themselves, webs coalesced to spilled ones,
1458 and webs conflicting with any of them. */
1459 for (pass
= 0; pass
< 2; pass
++)
1460 for (d
= (pass
== 0) ? WEBS(SPILLED
) : WEBS(COALESCED
); d
; d
= d
->next
)
1462 struct web
*web
= DLIST_WEB (d
);
1463 struct conflict_link
*wl
;
1465 /* This check is only needed for coalesced nodes, but hey. */
1466 if (alias (web
)->type
!= SPILLED
)
1469 /* For the spilled web itself we also need to clear it's
1470 uplink, to be able to rebuild smaller webs. After all
1471 spilling has split the web. */
1472 for (i
= 0; i
< web
->num_uses
; i
++)
1474 unsigned int id
= DF_REF_ID (web
->uses
[i
]);
1475 SET_BIT (last_check_uses
, id
);
1476 bitmap_set_bit (uses_as_bitmap
, id
);
1477 web_parts
[df
->def_id
+ id
].uplink
= NULL
;
1478 web_parts
[df
->def_id
+ id
].spanned_deaths
= 0;
1479 web_parts
[df
->def_id
+ id
].crosses_call
= 0;
1481 for (i
= 0; i
< web
->num_defs
; i
++)
1483 unsigned int id
= DF_REF_ID (web
->defs
[i
]);
1484 web_parts
[id
].uplink
= NULL
;
1485 web_parts
[id
].spanned_deaths
= 0;
1486 web_parts
[id
].crosses_call
= 0;
1489 /* Now look at all neighbors of this spilled web. */
1490 if (web
->have_orig_conflicts
)
1491 wl
= web
->orig_conflict_list
;
1493 wl
= web
->conflict_list
;
1494 for (; wl
; wl
= wl
->next
)
1496 if (TEST_BIT (already_webs
, wl
->t
->id
))
1498 SET_BIT (already_webs
, wl
->t
->id
);
1499 mark_refs_for_checking (wl
->t
, uses_as_bitmap
);
1501 EXECUTE_IF_SET_IN_BITMAP (web
->useless_conflicts
, 0, j
,
1503 struct web
*web2
= ID2WEB (j
);
1504 if (TEST_BIT (already_webs
, web2
->id
))
1506 SET_BIT (already_webs
, web2
->id
);
1507 mark_refs_for_checking (web2
, uses_as_bitmap
);
1511 /* We also recheck unconditionally all uses of any hardregs. This means
1512 we _can_ delete all these uses from the live_at_end[] bitmaps.
1513 And because we sometimes delete insn referring to hardregs (when
1514 they became useless because they setup a rematerializable pseudo, which
1515 then was rematerialized), some of those uses will go away with the next
1516 df_analyse(). This means we even _must_ delete those uses from
1517 the live_at_end[] bitmaps. For simplicity we simply delete
1519 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1522 struct df_link
*link
;
1523 for (link
= df
->regs
[i
].uses
; link
; link
= link
->next
)
1525 bitmap_set_bit (uses_as_bitmap
, DF_REF_ID (link
->ref
));
1528 /* The information in live_at_end[] will be rebuild for all uses
1529 we recheck, so clear it here (the uses of spilled webs, might
1530 indeed not become member of it again). */
1532 for (i
= 0; i
< (unsigned int) last_basic_block
+ 2; i
++)
1533 bitmap_operation (live_at_end
[i
], live_at_end
[i
], uses_as_bitmap
,
1537 if (rtl_dump_file
&& (debug_new_regalloc
& DUMP_REBUILD
) != 0)
1539 ra_debug_msg (DUMP_REBUILD
, "need to check these uses:\n");
1540 dump_sbitmap_file (rtl_dump_file
, last_check_uses
);
1542 sbitmap_free (already_webs
);
1543 BITMAP_XFREE (uses_as_bitmap
);
1546 /* Statistics about deleted insns, which are useless now. */
1547 static unsigned int deleted_def_insns
;
1548 static unsigned HOST_WIDE_INT deleted_def_cost
;
1550 /* In rewrite_program2() we noticed, when a certain insn set a pseudo
1551 which wasn't live. Try to delete all those insns. */
1554 delete_useless_defs (void)
1557 /* If the insn only sets the def without any sideeffect (besides
1558 clobbers or uses), we can delete it. single_set() also tests
1559 for INSN_P(insn). */
1560 EXECUTE_IF_SET_IN_BITMAP (useless_defs
, 0, i
,
1562 rtx insn
= DF_REF_INSN (df
->defs
[i
]);
1563 rtx set
= single_set (insn
);
1564 struct web
*web
= find_web_for_subweb (def2web
[i
]);
1565 if (set
&& web
->type
== SPILLED
&& web
->stack_slot
== NULL
)
1567 deleted_def_insns
++;
1568 deleted_def_cost
+= BLOCK_FOR_INSN (insn
)->frequency
+ 1;
1569 PUT_CODE (insn
, NOTE
);
1570 NOTE_LINE_NUMBER (insn
) = NOTE_INSN_DELETED
;
1571 df_insn_modify (df
, BLOCK_FOR_INSN (insn
), insn
);
1576 /* Look for spilled webs, on whose behalf no insns were emitted.
1577 We inversify (sp?) the changed flag of the webs, so after this function
1578 a nonzero changed flag means, that this web was not spillable (at least
1582 detect_non_changed_webs (void)
1584 struct dlist
*d
, *d_next
;
1585 for (d
= WEBS(SPILLED
); d
; d
= d_next
)
1587 struct web
*web
= DLIST_WEB (d
);
1591 ra_debug_msg (DUMP_PROCESS
, "no insns emitted for spilled web %d\n",
1593 remove_web_from_list (web
);
1594 put_web (web
, COLORED
);
1599 /* From now on web->changed is used as the opposite flag.
1600 I.e. colored webs, which have changed set were formerly
1601 spilled webs for which no insns were emitted. */
1605 /* Before spilling we clear the changed flags for all spilled webs. */
1608 reset_changed_flag (void)
1611 for (d
= WEBS(SPILLED
); d
; d
= d
->next
)
1612 DLIST_WEB(d
)->changed
= 0;
1615 /* The toplevel function for this file. Given a colorized graph,
1616 and lists of spilled, coalesced and colored webs, we add some
1617 spill code. This also sets up the structures for incrementally
1618 building the interference graph in the next pass. */
1624 bitmap new_deaths
= BITMAP_XMALLOC ();
1625 reset_changed_flag ();
1627 choose_spill_colors ();
1628 useless_defs
= BITMAP_XMALLOC ();
1629 if (flag_ra_improved_spilling
)
1630 rewrite_program2 (new_deaths
);
1632 rewrite_program (new_deaths
);
1633 insert_stores (new_deaths
);
1634 delete_useless_defs ();
1635 BITMAP_XFREE (useless_defs
);
1636 sbitmap_free (insns_with_deaths
);
1637 insns_with_deaths
= sbitmap_alloc (get_max_uid ());
1638 death_insns_max_uid
= get_max_uid ();
1639 sbitmap_zero (insns_with_deaths
);
1640 EXECUTE_IF_SET_IN_BITMAP (new_deaths
, 0, i
,
1641 { SET_BIT (insns_with_deaths
, i
);});
1642 detect_non_changed_webs ();
1643 detect_web_parts_to_rebuild ();
1644 BITMAP_XFREE (new_deaths
);
1647 /* A bitmap of pseudo reg numbers which are coalesced directly
1648 to a hardreg. Set in emit_colors(), used and freed in
1649 remove_suspicious_death_notes(). */
1650 static bitmap regnos_coalesced_to_hardregs
;
1652 /* Create new pseudos for each web we colored, change insns to
1653 use those pseudos and set up ra_reg_renumber. */
1656 emit_colors (struct df
*df
)
1661 int old_max_regno
= max_reg_num ();
1665 /* This bitmap is freed in remove_suspicious_death_notes(),
1666 which is also the user of it. */
1667 regnos_coalesced_to_hardregs
= BITMAP_XMALLOC ();
1668 /* First create the (REG xx) rtx's for all webs, as we need to know
1669 the number, to make sure, flow has enough memory for them in the
1671 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1674 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1676 if (web
->type
== COALESCED
&& alias (web
)->type
== COLORED
)
1678 if (web
->reg_rtx
|| web
->regno
< FIRST_PSEUDO_REGISTER
)
1681 if (web
->regno
>= max_normal_pseudo
)
1684 if (web
->color
== an_unusable_color
)
1686 unsigned int inherent_size
= PSEUDO_REGNO_BYTES (web
->regno
);
1687 unsigned int total_size
= MAX (inherent_size
, 0);
1688 place
= assign_stack_local (PSEUDO_REGNO_MODE (web
->regno
),
1690 inherent_size
== total_size
? 0 : -1);
1691 RTX_UNCHANGING_P (place
) =
1692 RTX_UNCHANGING_P (regno_reg_rtx
[web
->regno
]);
1693 set_mem_alias_set (place
, new_alias_set ());
1697 place
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1699 web
->reg_rtx
= place
;
1703 /* Special case for i386 'fix_truncdi_nomemory' insn.
1704 We must choose mode from insns not from PSEUDO_REGNO_MODE.
1705 Actual only for clobbered register. */
1706 if (web
->num_uses
== 0 && web
->num_defs
== 1)
1707 web
->reg_rtx
= gen_reg_rtx (GET_MODE (DF_REF_REAL_REG (web
->defs
[0])));
1709 web
->reg_rtx
= gen_reg_rtx (PSEUDO_REGNO_MODE (web
->regno
));
1710 /* Remember the different parts directly coalesced to a hardreg. */
1711 if (web
->type
== COALESCED
)
1712 bitmap_set_bit (regnos_coalesced_to_hardregs
, REGNO (web
->reg_rtx
));
1715 ra_max_regno
= max_regno
= max_reg_num ();
1716 allocate_reg_info (max_regno
, FALSE
, FALSE
);
1717 ra_reg_renumber
= xmalloc (max_regno
* sizeof (short));
1718 for (si
= 0; si
< max_regno
; si
++)
1719 ra_reg_renumber
[si
] = -1;
1721 /* Then go through all references, and replace them by a new
1722 pseudoreg for each web. All uses. */
1724 Beware: The order of replacements (first uses, then defs) matters only
1725 for read-mod-write insns, where the RTL expression for the REG is
1726 shared between def and use. For normal rmw insns we connected all such
1727 webs, i.e. both the use and the def (which are the same memory)
1728 there get the same new pseudo-reg, so order would not matter.
1729 _However_ we did not connect webs, were the read cycle was an
1730 uninitialized read. If we now would first replace the def reference
1731 and then the use ref, we would initialize it with a REG rtx, which
1732 gets never initialized, and yet more wrong, which would overwrite
1733 the definition of the other REG rtx. So we must replace the defs last.
1735 for (i
= 0; i
< df
->use_id
; i
++)
1738 regset rs
= DF_REF_BB (df
->uses
[i
])->global_live_at_start
;
1741 web
= find_web_for_subweb (web
);
1742 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1744 regrtx
= alias (web
)->reg_rtx
;
1746 regrtx
= web
->reg_rtx
;
1747 *DF_REF_REAL_LOC (df
->uses
[i
]) = regrtx
;
1748 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1750 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1751 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1756 for (i
= 0; i
< df
->def_id
; i
++)
1762 rs
= DF_REF_BB (df
->defs
[i
])->global_live_at_start
;
1764 web
= find_web_for_subweb (web
);
1765 if (web
->type
!= COLORED
&& web
->type
!= COALESCED
)
1767 regrtx
= alias (web
)->reg_rtx
;
1769 regrtx
= web
->reg_rtx
;
1770 *DF_REF_REAL_LOC (df
->defs
[i
]) = regrtx
;
1771 if (REGNO_REG_SET_P (rs
, web
->regno
) && REG_P (regrtx
))
1773 /* Don't simply clear the current regno, as it might be
1774 replaced by two webs. */
1775 /*CLEAR_REGNO_REG_SET (rs, web->regno);*/
1776 SET_REGNO_REG_SET (rs
, REGNO (regrtx
));
1780 /* And now set up the ra_reg_renumber array for reload with all the new
1782 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
1785 if (web
->reg_rtx
&& REG_P (web
->reg_rtx
))
1787 int r
= REGNO (web
->reg_rtx
);
1788 ra_reg_renumber
[r
] = web
->color
;
1789 ra_debug_msg (DUMP_COLORIZE
, "Renumber pseudo %d (== web %d) to %d\n",
1790 r
, web
->id
, ra_reg_renumber
[r
]);
1794 old_regs
= BITMAP_XMALLOC ();
1795 for (si
= FIRST_PSEUDO_REGISTER
; si
< old_max_regno
; si
++)
1796 SET_REGNO_REG_SET (old_regs
, si
);
1799 AND_COMPL_REG_SET (bb
->global_live_at_start
, old_regs
);
1800 AND_COMPL_REG_SET (bb
->global_live_at_end
, old_regs
);
1802 BITMAP_XFREE (old_regs
);
1805 /* Delete some coalesced moves from the insn stream. */
1810 struct move_list
*ml
;
1812 /* XXX Beware: We normally would test here each copy insn, if
1813 source and target got the same color (either by coalescing or by pure
1814 luck), and then delete it.
1815 This will currently not work. One problem is, that we don't color
1816 the regs ourself, but instead defer to reload. So the colorization
1817 is only a kind of suggestion, which reload doesn't have to follow.
1818 For webs which are coalesced to a normal colored web, we only have one
1819 new pseudo, so in this case we indeed can delete copy insns involving
1820 those (because even if reload colors them different from our suggestion,
1821 it still has to color them the same, as only one pseudo exists). But for
1822 webs coalesced to precolored ones, we have not a single pseudo, but
1823 instead one for each coalesced web. This means, that we can't delete
1824 copy insns, where source and target are webs coalesced to precolored
1825 ones, because then the connection between both webs is destroyed. Note
1826 that this not only means copy insns, where one side is the precolored one
1827 itself, but also those between webs which are coalesced to one color.
1828 Also because reload we can't delete copy insns which involve any
1829 precolored web at all. These often have also special meaning (e.g.
1830 copying a return value of a call to a pseudo, or copying pseudo to the
1831 return register), and the deletion would confuse reload in thinking the
1832 pseudo isn't needed. One of those days reload will get away and we can
1833 do everything we want.
1834 In effect because of the later reload, we can't base our deletion on the
1835 colors itself, but instead need to base them on the newly created
1837 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
1838 /* The real condition we would ideally use is: s->color == t->color.
1839 Additionally: s->type != PRECOLORED && t->type != PRECOLORED, in case
1840 we want to prevent deletion of "special" copies. */
1842 && (s
= alias (ml
->move
->source_web
))->reg_rtx
1843 == (t
= alias (ml
->move
->target_web
))->reg_rtx
1844 && s
->type
!= PRECOLORED
&& t
->type
!= PRECOLORED
)
1846 basic_block bb
= BLOCK_FOR_INSN (ml
->move
->insn
);
1847 df_insn_delete (df
, bb
, ml
->move
->insn
);
1848 deleted_move_insns
++;
1849 deleted_move_cost
+= bb
->frequency
+ 1;
1853 /* Due to reasons documented elsewhere we create different pseudos
1854 for all webs coalesced to hardregs. For these parts life_analysis()
1855 might have added REG_DEAD notes without considering, that only this part
1856 but not the whole coalesced web dies. The RTL is correct, there is no
1857 coalescing yet. But if later reload's alter_reg() substitutes the
1858 hardreg into the REG rtx it looks like that particular hardreg dies here,
1859 although (due to coalescing) it still is live. This might make different
1860 places of reload think, it can use that hardreg for reload regs,
1861 accidentally overwriting it. So we need to remove those REG_DEAD notes.
1862 (Or better teach life_analysis() and reload about our coalescing, but
1863 that comes later) Bah. */
1866 remove_suspicious_death_notes (void)
1869 for (insn
= get_insns(); insn
; insn
= NEXT_INSN (insn
))
1872 rtx
*pnote
= ®_NOTES (insn
);
1876 if ((REG_NOTE_KIND (note
) == REG_DEAD
1877 || REG_NOTE_KIND (note
) == REG_UNUSED
)
1878 && (GET_CODE (XEXP (note
, 0)) == REG
1879 && bitmap_bit_p (regnos_coalesced_to_hardregs
,
1880 REGNO (XEXP (note
, 0)))))
1881 *pnote
= XEXP (note
, 1);
1883 pnote
= &XEXP (*pnote
, 1);
1886 BITMAP_XFREE (regnos_coalesced_to_hardregs
);
1887 regnos_coalesced_to_hardregs
= NULL
;
1890 /* Allocate space for max_reg_num() pseudo registers, and
1891 fill reg_renumber[] from ra_reg_renumber[]. If FREE_IT
1892 is nonzero, also free ra_reg_renumber and reset ra_max_regno. */
1895 setup_renumber (int free_it
)
1898 max_regno
= max_reg_num ();
1899 allocate_reg_info (max_regno
, FALSE
, TRUE
);
1900 for (i
= 0; i
< max_regno
; i
++)
1902 reg_renumber
[i
] = (i
< ra_max_regno
) ? ra_reg_renumber
[i
] : -1;
1906 free (ra_reg_renumber
);
1907 ra_reg_renumber
= NULL
;
1912 /* Dump the costs and savings due to spilling, i.e. of added spill insns
1913 and removed moves or useless defs. */
1916 dump_cost (unsigned int level
)
1918 ra_debug_msg (level
, "Instructions for spilling\n added:\n");
1919 ra_debug_msg (level
, " loads =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1920 emitted_spill_loads
, spill_load_cost
);
1921 ra_debug_msg (level
, " stores=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1922 emitted_spill_stores
, spill_store_cost
);
1923 ra_debug_msg (level
, " remat =%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1924 emitted_remat
, spill_remat_cost
);
1925 ra_debug_msg (level
, " removed:\n moves =%d cost="
1926 HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1927 deleted_move_insns
, deleted_move_cost
);
1928 ra_debug_msg (level
, " others=%d cost=" HOST_WIDE_INT_PRINT_UNSIGNED
"\n",
1929 deleted_def_insns
, deleted_def_cost
);
1932 /* Initialization of the rewrite phase. */
1935 ra_rewrite_init (void)
1937 emitted_spill_loads
= 0;
1938 emitted_spill_stores
= 0;
1940 spill_load_cost
= 0;
1941 spill_store_cost
= 0;
1942 spill_remat_cost
= 0;
1943 deleted_move_insns
= 0;
1944 deleted_move_cost
= 0;
1945 deleted_def_insns
= 0;
1946 deleted_def_cost
= 0;
1950 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: