1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #include "coretypes.h"
27 #include "insn-config.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
39 /* This file is part of the graph coloring register allocator.
40 It deals with building the interference graph. When rebuilding
41 the graph for a function after spilling, we rebuild only those
42 parts needed, i.e. it works incrementally.
44 The first part (the functions called from build_web_parts_and_conflicts()
45 ) constructs a web_part for each pseudo register reference in the insn
46 stream, then goes backward from each use, until it reaches defs for that
47 pseudo. While going back it remember seen defs for other registers as
48 conflicts. By connecting the uses and defs, which reach each other, webs
49 (or live ranges) are built conceptually.
51 The second part (make_webs() and children) deals with converting that
52 structure to the nodes and edges, on which our interference graph is
53 built. For each root web part constructed above, an instance of struct
54 web is created. For all subregs of pseudos, which matter for allocation,
55 a subweb of the corresponding super web is built. Finally all the
56 conflicts noted in the first part (as bitmaps) are transformed into real
59 As part of that process the webs are also classified (their spill cost
60 is calculated, and if they are spillable at all, and if not, for what
61 reason; or if they are rematerializable), and move insns are collected,
62 which are potentially coalescable.
64 The top-level entry of this file (build_i_graph) puts it all together,
65 and leaves us with a complete interference graph, which just has to
71 static unsigned HOST_WIDE_INT rtx_to_undefined
PARAMS ((rtx
));
72 static bitmap find_sub_conflicts
PARAMS ((struct web_part
*, unsigned int));
73 static bitmap get_sub_conflicts
PARAMS ((struct web_part
*, unsigned int));
74 static unsigned int undef_to_size_word
PARAMS ((rtx
, unsigned HOST_WIDE_INT
*));
75 static bitmap undef_to_bitmap
PARAMS ((struct web_part
*,
76 unsigned HOST_WIDE_INT
*));
77 static struct web_part
* find_web_part_1
PARAMS ((struct web_part
*));
78 static struct web_part
* union_web_part_roots
79 PARAMS ((struct web_part
*, struct web_part
*));
80 static int defuse_overlap_p_1
PARAMS ((rtx
, struct curr_use
*));
81 static int live_out_1
PARAMS ((struct df
*, struct curr_use
*, rtx
));
82 static int live_out
PARAMS ((struct df
*, struct curr_use
*, rtx
));
83 static rtx live_in_edge
PARAMS (( struct df
*, struct curr_use
*, edge
));
84 static void live_in
PARAMS ((struct df
*, struct curr_use
*, rtx
));
85 static int copy_insn_p
PARAMS ((rtx
, rtx
*, rtx
*));
86 static void remember_move
PARAMS ((rtx
));
87 static void handle_asm_insn
PARAMS ((struct df
*, rtx
));
88 static void prune_hardregs_for_mode
PARAMS ((HARD_REG_SET
*,
90 static void init_one_web_common
PARAMS ((struct web
*, rtx
));
91 static void init_one_web
PARAMS ((struct web
*, rtx
));
92 static void reinit_one_web
PARAMS ((struct web
*, rtx
));
93 static struct web
* add_subweb
PARAMS ((struct web
*, rtx
));
94 static struct web
* add_subweb_2
PARAMS ((struct web
*, unsigned int));
95 static void init_web_parts
PARAMS ((struct df
*));
96 static void copy_conflict_list
PARAMS ((struct web
*));
97 static void add_conflict_edge
PARAMS ((struct web
*, struct web
*));
98 static void build_inverse_webs
PARAMS ((struct web
*));
99 static void copy_web
PARAMS ((struct web
*, struct web_link
**));
100 static void compare_and_free_webs
PARAMS ((struct web_link
**));
101 static void init_webs_defs_uses
PARAMS ((void));
102 static unsigned int parts_to_webs_1
PARAMS ((struct df
*, struct web_link
**,
104 static void parts_to_webs
PARAMS ((struct df
*));
105 static void reset_conflicts
PARAMS ((void));
107 static void check_conflict_numbers
PARAMS ((void));
109 static void conflicts_between_webs
PARAMS ((struct df
*));
110 static void remember_web_was_spilled
PARAMS ((struct web
*));
111 static void detect_spill_temps
PARAMS ((void));
112 static int contains_pseudo
PARAMS ((rtx
));
113 static int want_to_remat
PARAMS ((rtx x
));
114 static void detect_remat_webs
PARAMS ((void));
115 static void determine_web_costs
PARAMS ((void));
116 static void detect_webs_set_in_cond_jump
PARAMS ((void));
117 static void make_webs
PARAMS ((struct df
*));
118 static void moves_to_webs
PARAMS ((struct df
*));
119 static void connect_rmw_web_parts
PARAMS ((struct df
*));
120 static void update_regnos_mentioned
PARAMS ((void));
121 static void livethrough_conflicts_bb
PARAMS ((basic_block
));
122 static void init_bb_info
PARAMS ((void));
123 static void free_bb_info
PARAMS ((void));
124 static void build_web_parts_and_conflicts
PARAMS ((struct df
*));
127 /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
129 static sbitmap live_over_abnormal
;
131 /* To cache if we already saw a certain edge while analyzing one
132 use, we use a tick count incremented per use. */
133 static unsigned int visited_pass
;
135 /* A sbitmap of UIDs of move insns, which we already analyzed. */
136 static sbitmap move_handled
;
138 /* One such structed is allocated per insn, and traces for the currently
139 analyzed use, which web part belongs to it, and how many bytes of
140 it were still undefined when that insn was reached. */
144 unsigned HOST_WIDE_INT undefined
;
146 /* Indexed by UID. */
147 static struct visit_trace
*visit_trace
;
149 /* Per basic block we have one such structure, used to speed up
150 the backtracing of uses. */
153 /* The value of visited_pass, as the first insn of this BB was reached
154 the last time. If this equals the current visited_pass, then
155 undefined is valid. Otherwise not. */
157 /* The still undefined bytes at that time. The use to which this is
158 relative is the current use. */
159 unsigned HOST_WIDE_INT undefined
;
160 /* Bit regno is set, if that regno is mentioned in this BB as a def, or
161 the source of a copy insn. In these cases we can not skip the whole
162 block if we reach it from the end. */
163 bitmap regnos_mentioned
;
164 /* If a use reaches the end of a BB, and that BB doesn't mention its
165 regno, we skip the block, and remember the ID of that use
166 as living throughout the whole block. */
167 bitmap live_throughout
;
168 /* The content of the aux field before placing a pointer to this
173 /* We need a fast way to describe a certain part of a register.
174 Therefore we put together the size and offset (in bytes) in one
176 #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
177 #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
178 #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
180 /* For a REG or SUBREG expression X return the size/offset pair
187 unsigned int len
, beg
;
188 len
= GET_MODE_SIZE (GET_MODE (x
));
189 beg
= (GET_CODE (x
) == SUBREG
) ? SUBREG_BYTE (x
) : 0;
190 return BL_TO_WORD (beg
, len
);
193 /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
195 static unsigned HOST_WIDE_INT
199 unsigned int len
, beg
;
200 unsigned HOST_WIDE_INT ret
;
201 len
= GET_MODE_SIZE (GET_MODE (x
));
202 beg
= (GET_CODE (x
) == SUBREG
) ? SUBREG_BYTE (x
) : 0;
203 ret
= ~ ((unsigned HOST_WIDE_INT
) 0);
204 ret
= (~(ret
<< len
)) << beg
;
208 /* We remember if we've analyzed an insn for being a move insn, and if yes
209 between which operands. */
217 /* On demand cache, for if insns are copy insns, and if yes, what
218 source/target they have. */
219 static struct copy_p_cache
* copy_cache
;
223 /* For INSN, return nonzero, if it's a move insn, we consider to coalesce
224 later, and place the operands in *SOURCE and *TARGET, if they are
228 copy_insn_p (insn
, source
, target
)
234 unsigned int d_regno
, s_regno
;
235 int uid
= INSN_UID (insn
);
240 /* First look, if we already saw this insn. */
241 if (copy_cache
[uid
].seen
)
243 /* And if we saw it, if it's actually a copy insn. */
244 if (copy_cache
[uid
].seen
== 1)
247 *source
= copy_cache
[uid
].source
;
249 *target
= copy_cache
[uid
].target
;
255 /* Mark it as seen, but not being a copy insn. */
256 copy_cache
[uid
].seen
= 2;
257 insn
= single_set (insn
);
263 /* We recognize moves between subreg's as copy insns. This is used to avoid
264 conflicts of those subwebs. But they are currently _not_ used for
265 coalescing (the check for this is in remember_move() below). */
266 while (GET_CODE (d
) == STRICT_LOW_PART
)
268 if (GET_CODE (d
) != REG
269 && (GET_CODE (d
) != SUBREG
|| GET_CODE (SUBREG_REG (d
)) != REG
))
271 while (GET_CODE (s
) == STRICT_LOW_PART
)
273 if (GET_CODE (s
) != REG
274 && (GET_CODE (s
) != SUBREG
|| GET_CODE (SUBREG_REG (s
)) != REG
))
277 s_regno
= (unsigned) REGNO (GET_CODE (s
) == SUBREG
? SUBREG_REG (s
) : s
);
278 d_regno
= (unsigned) REGNO (GET_CODE (d
) == SUBREG
? SUBREG_REG (d
) : d
);
280 /* Copies between hardregs are useless for us, as not coalesable anyway. */
281 if ((s_regno
< FIRST_PSEUDO_REGISTER
282 && d_regno
< FIRST_PSEUDO_REGISTER
)
283 || s_regno
>= max_normal_pseudo
284 || d_regno
>= max_normal_pseudo
)
292 /* Still mark it as seen, but as a copy insn this time. */
293 copy_cache
[uid
].seen
= 1;
294 copy_cache
[uid
].source
= s
;
295 copy_cache
[uid
].target
= d
;
299 /* We build webs, as we process the conflicts. For each use we go upward
300 the insn stream, noting any defs as potentially conflicting with the
301 current use. We stop at defs of the current regno. The conflicts are only
302 potentially, because we may never reach a def, so this is an undefined use,
303 which conflicts with nothing. */
306 /* Given a web part WP, and the location of a reg part SIZE_WORD
307 return the conflict bitmap for that reg part, or NULL if it doesn't
311 find_sub_conflicts (wp
, size_word
)
313 unsigned int size_word
;
315 struct tagged_conflict
*cl
;
316 cl
= wp
->sub_conflicts
;
317 for (; cl
; cl
= cl
->next
)
318 if (cl
->size_word
== size_word
)
319 return cl
->conflicts
;
323 /* Similar to find_sub_conflicts(), but creates that bitmap, if it
324 doesn't exist. I.e. this never returns NULL. */
327 get_sub_conflicts (wp
, size_word
)
329 unsigned int size_word
;
331 bitmap b
= find_sub_conflicts (wp
, size_word
);
334 struct tagged_conflict
*cl
=
335 (struct tagged_conflict
*) ra_alloc (sizeof *cl
);
336 cl
->conflicts
= BITMAP_XMALLOC ();
337 cl
->size_word
= size_word
;
338 cl
->next
= wp
->sub_conflicts
;
339 wp
->sub_conflicts
= cl
;
345 /* Helper table for undef_to_size_word() below for small values
346 of UNDEFINED. Offsets and lengths are byte based. */
347 static struct undef_table_s
{
348 unsigned int new_undef
;
349 /* size | (byte << 16) */
350 unsigned int size_word
;
351 } const undef_table
[] = {
352 { 0, BL_TO_WORD (0, 0)}, /* 0 */
353 { 0, BL_TO_WORD (0, 1)},
354 { 0, BL_TO_WORD (1, 1)},
355 { 0, BL_TO_WORD (0, 2)},
356 { 0, BL_TO_WORD (2, 1)}, /* 4 */
357 { 1, BL_TO_WORD (2, 1)},
358 { 2, BL_TO_WORD (2, 1)},
359 { 3, BL_TO_WORD (2, 1)},
360 { 0, BL_TO_WORD (3, 1)}, /* 8 */
361 { 1, BL_TO_WORD (3, 1)},
362 { 2, BL_TO_WORD (3, 1)},
363 { 3, BL_TO_WORD (3, 1)},
364 { 0, BL_TO_WORD (2, 2)}, /* 12 */
365 { 1, BL_TO_WORD (2, 2)},
366 { 2, BL_TO_WORD (2, 2)},
367 { 0, BL_TO_WORD (0, 4)}};
369 /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
370 A set bit means an undefined byte. Factor all undefined bytes into
371 groups, and return a size/ofs pair of consecutive undefined bytes,
372 but according to certain borders. Clear out those bits corresponding
373 to bytes overlaid by that size/ofs pair. REG is only used for
374 the mode, to detect if it's a floating mode or not.
376 For example: *UNDEFINED size+ofs new *UNDEFINED
386 undef_to_size_word (reg
, undefined
)
388 unsigned HOST_WIDE_INT
*undefined
;
390 /* When only the lower four bits are possibly set, we use
391 a fast lookup table. */
392 if (*undefined
<= 15)
394 struct undef_table_s u
;
395 u
= undef_table
[*undefined
];
396 *undefined
= u
.new_undef
;
400 /* Otherwise we handle certain cases directly. */
401 if (*undefined
<= 0xffff)
402 switch ((int) *undefined
)
404 case 0x00f0 : *undefined
= 0; return BL_TO_WORD (4, 4);
405 case 0x00ff : *undefined
= 0; return BL_TO_WORD (0, 8);
406 case 0x0f00 : *undefined
= 0; return BL_TO_WORD (8, 4);
407 case 0x0ff0 : *undefined
= 0xf0; return BL_TO_WORD (8, 4);
409 if (INTEGRAL_MODE_P (GET_MODE (reg
)))
410 { *undefined
= 0xff; return BL_TO_WORD (8, 4); }
412 { *undefined
= 0; return BL_TO_WORD (0, 12); /* XFmode */ }
413 case 0xf000 : *undefined
= 0; return BL_TO_WORD (12, 4);
414 case 0xff00 : *undefined
= 0; return BL_TO_WORD (8, 8);
415 case 0xfff0 : *undefined
= 0xf0; return BL_TO_WORD (8, 8);
416 case 0xffff : *undefined
= 0; return BL_TO_WORD (0, 16);
419 /* And if nothing matched fall back to the general solution. For
420 now unknown undefined bytes are converted to sequences of maximal
421 length 4 bytes. We could make this larger if necessary. */
423 unsigned HOST_WIDE_INT u
= *undefined
;
425 struct undef_table_s tab
;
426 for (word
= 0; (u
& 15) == 0; word
+= 4)
429 tab
= undef_table
[u
];
431 u
= (*undefined
& ~((unsigned HOST_WIDE_INT
)15 << word
)) | (u
<< word
);
433 /* Size remains the same, only the begin is moved up move bytes. */
434 return tab
.size_word
+ BL_TO_WORD (word
, 0);
438 /* Put the above three functions together. For a set of undefined bytes
439 as bitmap *UNDEFINED, look for (create if necessary) and return the
440 corresponding conflict bitmap. Change *UNDEFINED to remove the bytes
441 covered by the part for that bitmap. */
444 undef_to_bitmap (wp
, undefined
)
446 unsigned HOST_WIDE_INT
*undefined
;
448 unsigned int size_word
= undef_to_size_word (DF_REF_REAL_REG (wp
->ref
),
450 return get_sub_conflicts (wp
, size_word
);
453 /* Returns the root of the web part P is a member of. Additionally
454 it compresses the path. P may not be NULL. */
456 static struct web_part
*
460 struct web_part
*r
= p
;
461 struct web_part
*p_next
;
464 for (; p
!= r
; p
= p_next
)
472 /* Fast macro for the common case (WP either being the root itself, or
473 the end of an already compressed path. */
475 #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
476 : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
478 /* Unions together the parts R1 resp. R2 is a root of.
479 All dynamic information associated with the parts (number of spanned insns
480 and so on) is also merged.
481 The root of the resulting (possibly larger) web part is returned. */
483 static struct web_part
*
484 union_web_part_roots (r1
, r2
)
485 struct web_part
*r1
, *r2
;
489 /* The new root is the smaller (pointerwise) of both. This is crucial
490 to make the construction of webs from web parts work (so, when
491 scanning all parts, we see the roots before all its children).
492 Additionally this ensures, that if the web has a def at all, than
493 the root is a def (because all def parts are before use parts in the
494 web_parts[] array), or put another way, as soon, as the root of a
495 web part is not a def, this is an uninitialized web part. The
496 way we construct the I-graph ensures, that if a web is initialized,
497 then the first part we find (besides trivial 1 item parts) has a
501 struct web_part
*h
= r1
;
508 /* Now we merge the dynamic information of R1 and R2. */
509 r1
->spanned_deaths
+= r2
->spanned_deaths
;
511 if (!r1
->sub_conflicts
)
512 r1
->sub_conflicts
= r2
->sub_conflicts
;
513 else if (r2
->sub_conflicts
)
514 /* We need to merge the conflict bitmaps from R2 into R1. */
516 struct tagged_conflict
*cl1
, *cl2
;
517 /* First those from R2, which are also contained in R1.
518 We union the bitmaps, and free those from R2, resetting them
520 for (cl1
= r1
->sub_conflicts
; cl1
; cl1
= cl1
->next
)
521 for (cl2
= r2
->sub_conflicts
; cl2
; cl2
= cl2
->next
)
522 if (cl1
->size_word
== cl2
->size_word
)
524 bitmap_operation (cl1
->conflicts
, cl1
->conflicts
,
525 cl2
->conflicts
, BITMAP_IOR
);
526 BITMAP_XFREE (cl2
->conflicts
);
527 cl2
->conflicts
= NULL
;
529 /* Now the conflict lists from R2 which weren't in R1.
530 We simply copy the entries from R2 into R1' list. */
531 for (cl2
= r2
->sub_conflicts
; cl2
;)
533 struct tagged_conflict
*cl_next
= cl2
->next
;
536 cl2
->next
= r1
->sub_conflicts
;
537 r1
->sub_conflicts
= cl2
;
542 r2
->sub_conflicts
= NULL
;
543 r1
->crosses_call
|= r2
->crosses_call
;
548 /* Convenience macro, that is capable of unioning also non-roots. */
549 #define union_web_parts(p1, p2) \
550 ((p1 == p2) ? find_web_part (p1) \
551 : union_web_part_roots (find_web_part (p1), find_web_part (p2)))
553 /* Remember that we've handled a given move, so we don't reprocess it. */
559 if (!TEST_BIT (move_handled
, INSN_UID (insn
)))
562 SET_BIT (move_handled
, INSN_UID (insn
));
563 if (copy_insn_p (insn
, &s
, &d
))
565 /* Some sanity test for the copy insn. */
566 struct df_link
*slink
= DF_INSN_USES (df
, insn
);
567 struct df_link
*link
= DF_INSN_DEFS (df
, insn
);
568 if (!link
|| !link
->ref
|| !slink
|| !slink
->ref
)
570 /* The following (link->next != 0) happens when a hardreg
571 is used in wider mode (REG:DI %eax). Then df.* creates
572 a def/use for each hardreg contained therein. We only
573 allow hardregs here. */
575 && DF_REF_REGNO (link
->next
->ref
) >= FIRST_PSEUDO_REGISTER
)
580 /* XXX for now we don't remember move insns involving any subregs.
581 Those would be difficult to coalesce (we would need to implement
582 handling of all the subwebs in the allocator, including that such
583 subwebs could be source and target of coalescing). */
584 if (GET_CODE (s
) == REG
&& GET_CODE (d
) == REG
)
586 struct move
*m
= (struct move
*) ra_calloc (sizeof (struct move
));
587 struct move_list
*ml
;
589 ml
= (struct move_list
*) ra_alloc (sizeof (struct move_list
));
597 /* This describes the USE currently looked at in the main-loop in
598 build_web_parts_and_conflicts(). */
601 /* This has a 1-bit for each byte in the USE, which is still undefined. */
602 unsigned HOST_WIDE_INT undefined
;
603 /* For easy access. */
606 /* If some bits of this USE are live over an abnormal edge. */
607 unsigned int live_over_abnormal
;
610 /* Returns nonzero iff rtx DEF and USE have bits in common (but see below).
611 It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a)
612 x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide,
613 and a is a multi-word pseudo. If DEF or USE are hardregs, they are in
614 word_mode, so we don't need to check for further hardregs which would result
615 from wider references. We are never called with paradoxical subregs.
618 0 for no common bits,
619 1 if DEF and USE exactly cover the same bytes,
620 2 if the DEF only covers a part of the bits of USE
621 3 if the DEF covers more than the bits of the USE, and
622 4 if both are SUBREG's of different size, but have bytes in common.
623 -1 is a special case, for when DEF and USE refer to the same regno, but
624 have for other reasons no bits in common (can only happen with
625 subregs refering to different words, or to words which already were
626 defined for this USE).
627 Furthermore it modifies use->undefined to clear the bits which get defined
628 by DEF (only for cases with partial overlap).
629 I.e. if bit 1 is set for the result != -1, the USE was completely covered,
630 otherwise a test is needed to track the already defined bytes. */
633 defuse_overlap_p_1 (def
, use
)
635 struct curr_use
*use
;
642 if (GET_CODE (def
) == SUBREG
)
644 if (REGNO (SUBREG_REG (def
)) != use
->regno
)
648 else if (REGNO (def
) != use
->regno
)
650 if (GET_CODE (use
->x
) == SUBREG
)
654 case 0: /* REG, REG */
656 case 1: /* SUBREG, REG */
658 unsigned HOST_WIDE_INT old_u
= use
->undefined
;
659 use
->undefined
&= ~ rtx_to_undefined (def
);
660 return (old_u
!= use
->undefined
) ? 2 : -1;
662 case 2: /* REG, SUBREG */
664 case 3: /* SUBREG, SUBREG */
665 if (GET_MODE_SIZE (GET_MODE (def
)) == GET_MODE_SIZE (GET_MODE (use
->x
)))
666 /* If the size of both things is the same, the subreg's overlap
667 if they refer to the same word. */
668 if (SUBREG_BYTE (def
) == SUBREG_BYTE (use
->x
))
670 /* Now the more difficult part: the same regno is refered, but the
671 sizes of the references or the words differ. E.g.
672 (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
673 overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
676 unsigned HOST_WIDE_INT old_u
;
678 unsigned int bl1
, bl2
;
679 bl1
= rtx_to_bits (def
);
680 bl2
= rtx_to_bits (use
->x
);
681 b1
= BYTE_BEGIN (bl1
);
682 b2
= BYTE_BEGIN (bl2
);
683 e1
= b1
+ BYTE_LENGTH (bl1
) - 1;
684 e2
= b2
+ BYTE_LENGTH (bl2
) - 1;
685 if (b1
> e2
|| b2
> e1
)
687 old_u
= use
->undefined
;
688 use
->undefined
&= ~ rtx_to_undefined (def
);
689 return (old_u
!= use
->undefined
) ? 4 : -1;
696 /* Macro for the common case of either def and use having the same rtx,
697 or based on different regnos. */
698 #define defuse_overlap_p(def, use) \
699 ((def) == (use)->x ? 1 : \
700 (REGNO (GET_CODE (def) == SUBREG \
701 ? SUBREG_REG (def) : def) != use->regno \
702 ? 0 : defuse_overlap_p_1 (def, use)))
705 /* The use USE flows into INSN (backwards). Determine INSNs effect on it,
706 and return nonzero, if (parts of) that USE are also live before it.
707 This also notes conflicts between the USE and all DEFS in that insn,
708 and modifies the undefined bits of USE in case parts of it were set in
712 live_out_1 (df
, use
, insn
)
713 struct df
*df ATTRIBUTE_UNUSED
;
714 struct curr_use
*use
;
718 int uid
= INSN_UID (insn
);
719 struct web_part
*wp
= use
->wp
;
721 /* Mark, that this insn needs this webpart live. */
722 visit_trace
[uid
].wp
= wp
;
723 visit_trace
[uid
].undefined
= use
->undefined
;
727 unsigned int source_regno
= ~0;
728 unsigned int regno
= use
->regno
;
729 unsigned HOST_WIDE_INT orig_undef
= use
->undefined
;
730 unsigned HOST_WIDE_INT final_undef
= use
->undefined
;
732 unsigned int n
, num_defs
= insn_df
[uid
].num_defs
;
733 struct ref
**defs
= insn_df
[uid
].defs
;
735 /* We want to access the root webpart. */
736 wp
= find_web_part (wp
);
737 if (GET_CODE (insn
) == CALL_INSN
)
738 wp
->crosses_call
= 1;
739 else if (copy_insn_p (insn
, &s
, NULL
))
740 source_regno
= REGNO (GET_CODE (s
) == SUBREG
? SUBREG_REG (s
) : s
);
742 /* Look at all DEFS in this insn. */
743 for (n
= 0; n
< num_defs
; n
++)
745 struct ref
*ref
= defs
[n
];
748 /* Reset the undefined bits for each iteration, in case this
749 insn has more than one set, and one of them sets this regno.
750 But still the original undefined part conflicts with the other
752 use
->undefined
= orig_undef
;
753 if ((lap
= defuse_overlap_p (DF_REF_REG (ref
), use
)) != 0)
756 /* Same regnos but non-overlapping or already defined bits,
757 so ignore this DEF, or better said make the yet undefined
758 part and this DEF conflicting. */
760 unsigned HOST_WIDE_INT undef
;
761 undef
= use
->undefined
;
763 bitmap_set_bit (undef_to_bitmap (wp
, &undef
),
768 /* The current DEF completely covers the USE, so we can
769 stop traversing the code looking for further DEFs. */
772 /* We have a partial overlap. */
774 final_undef
&= use
->undefined
;
775 if (final_undef
== 0)
776 /* Now the USE is completely defined, which means, that
777 we can stop looking for former DEFs. */
779 /* If this is a partial overlap, which left some bits
780 in USE undefined, we normally would need to create
781 conflicts between that undefined part and the part of
782 this DEF which overlapped with some of the formerly
783 undefined bits. We don't need to do this, because both
784 parts of this DEF (that which overlaps, and that which
785 doesn't) are written together in this one DEF, and can
786 not be colored in a way which would conflict with
787 the USE. This is only true for partial overlap,
788 because only then the DEF and USE have bits in common,
789 which makes the DEF move, if the USE moves, making them
791 If they have no bits in common (lap == -1), they are
792 really independent. Therefore we there made a
795 /* This is at least a partial overlap, so we need to union
797 wp
= union_web_parts (wp
, &web_parts
[DF_REF_ID (ref
)]);
801 /* The DEF and the USE don't overlap at all, different
802 regnos. I.e. make conflicts between the undefined bits,
804 unsigned HOST_WIDE_INT undef
= use
->undefined
;
806 if (regno
== source_regno
)
807 /* This triggers only, when this was a copy insn and the
808 source is at least a part of the USE currently looked at.
809 In this case only the bits of the USE conflict with the
810 DEF, which are not covered by the source of this copy
811 insn, and which are still undefined. I.e. in the best
812 case (the whole reg being the source), _no_ conflicts
813 between that USE and this DEF (the target of the move)
814 are created by this insn (though they might be by
815 others). This is a super case of the normal copy insn
816 only between full regs. */
818 undef
&= ~ rtx_to_undefined (s
);
822 /*struct web_part *cwp;
823 cwp = find_web_part (&web_parts[DF_REF_ID
826 /* TODO: somehow instead of noting the ID of the LINK
827 use an ID nearer to the root webpart of that LINK.
828 We can't use the root itself, because we later use the
829 ID to look at the form (reg or subreg, and if yes,
830 which subreg) of this conflict. This means, that we
831 need to remember in the root an ID for each form, and
832 maintaining this, when merging web parts. This makes
833 the bitmaps smaller. */
835 bitmap_set_bit (undef_to_bitmap (wp
, &undef
),
845 /* If this insn doesn't completely define the USE, increment also
846 it's spanned deaths count (if this insn contains a death). */
847 if (uid
>= death_insns_max_uid
)
849 if (TEST_BIT (insns_with_deaths
, uid
))
850 wp
->spanned_deaths
++;
851 use
->undefined
= final_undef
;
858 /* Same as live_out_1() (actually calls it), but caches some information.
859 E.g. if we reached this INSN with the current regno already, and the
860 current undefined bits are a subset of those as we came here, we
861 simply connect the web parts of the USE, and the one cached for this
862 INSN, and additionally return zero, indicating we don't need to traverse
863 this path any longer (all effect were already seen, as we first reached
867 live_out (df
, use
, insn
)
869 struct curr_use
*use
;
872 unsigned int uid
= INSN_UID (insn
);
873 if (visit_trace
[uid
].wp
874 && DF_REF_REGNO (visit_trace
[uid
].wp
->ref
) == use
->regno
875 && (use
->undefined
& ~visit_trace
[uid
].undefined
) == 0)
877 union_web_parts (visit_trace
[uid
].wp
, use
->wp
);
878 /* Don't search any further, as we already were here with this regno. */
882 return live_out_1 (df
, use
, insn
);
885 /* The current USE reached a basic block head. The edge E is one
886 of the predecessors edges. This evaluates the effect of the predecessor
887 block onto the USE, and returns the next insn, which should be looked at.
888 This either is the last insn of that pred. block, or the first one.
889 The latter happens, when the pred. block has no possible effect on the
890 USE, except for conflicts. In that case, it's remembered, that the USE
891 is live over that whole block, and it's skipped. Otherwise we simply
892 continue with the last insn of the block.
894 This also determines the effects of abnormal edges, and remembers
895 which uses are live at the end of that basic block. */
898 live_in_edge (df
, use
, e
)
900 struct curr_use
*use
;
903 struct ra_bb_info
*info_pred
;
905 /* Call used hard regs die over an exception edge, ergo
906 they don't reach the predecessor block, so ignore such
907 uses. And also don't set the live_over_abnormal flag
909 if ((e
->flags
& EDGE_EH
) && use
->regno
< FIRST_PSEUDO_REGISTER
910 && call_used_regs
[use
->regno
])
912 if (e
->flags
& EDGE_ABNORMAL
)
913 use
->live_over_abnormal
= 1;
914 bitmap_set_bit (live_at_end
[e
->src
->index
], DF_REF_ID (use
->wp
->ref
));
915 info_pred
= (struct ra_bb_info
*) e
->src
->aux
;
916 next_insn
= e
->src
->end
;
918 /* If the last insn of the pred. block doesn't completely define the
919 current use, we need to check the block. */
920 if (live_out (df
, use
, next_insn
))
922 /* If the current regno isn't mentioned anywhere in the whole block,
923 and the complete use is still undefined... */
924 if (!bitmap_bit_p (info_pred
->regnos_mentioned
, use
->regno
)
925 && (rtx_to_undefined (use
->x
) & ~use
->undefined
) == 0)
927 /* ...we can hop over the whole block and defer conflict
928 creation to later. */
929 bitmap_set_bit (info_pred
->live_throughout
,
930 DF_REF_ID (use
->wp
->ref
));
931 next_insn
= e
->src
->head
;
939 /* USE flows into the end of the insns preceding INSN. Determine
940 their effects (in live_out()) and possibly loop over the preceding INSN,
941 or call itself recursively on a basic block border. When a topleve
942 call of this function returns the USE is completely analyzed. I.e.
943 its def-use chain (at least) is built, possibly connected with other
944 def-use chains, and all defs during that chain are noted. */
947 live_in (df
, use
, insn
)
949 struct curr_use
*use
;
952 unsigned int loc_vpass
= visited_pass
;
954 /* Note, that, even _if_ we are called with use->wp a root-part, this might
955 become non-root in the for() loop below (due to live_out() unioning
956 it). So beware, not to change use->wp in a way, for which only root-webs
960 int uid
= INSN_UID (insn
);
961 basic_block bb
= BLOCK_FOR_INSN (insn
);
964 /* We want to be as fast as possible, so explicitly write
966 for (insn
= PREV_INSN (insn
); insn
&& !INSN_P (insn
);
967 insn
= PREV_INSN (insn
))
971 if (bb
!= BLOCK_FOR_INSN (insn
))
974 unsigned HOST_WIDE_INT undef
= use
->undefined
;
975 struct ra_bb_info
*info
= (struct ra_bb_info
*) bb
->aux
;
976 if ((e
= bb
->pred
) == NULL
)
978 /* We now check, if we already traversed the predecessors of this
979 block for the current pass and the current set of undefined
980 bits. If yes, we don't need to check the predecessors again.
981 So, conceptually this information is tagged to the first
982 insn of a basic block. */
983 if (info
->pass
== loc_vpass
&& (undef
& ~info
->undefined
) == 0)
985 info
->pass
= loc_vpass
;
986 info
->undefined
= undef
;
987 /* All but the last predecessor are handled recursively. */
988 for (; e
->pred_next
; e
= e
->pred_next
)
990 insn
= live_in_edge (df
, use
, e
);
992 live_in (df
, use
, insn
);
993 use
->undefined
= undef
;
995 insn
= live_in_edge (df
, use
, e
);
999 else if (!live_out (df
, use
, insn
))
1004 /* Determine all regnos which are mentioned in a basic block, in an
1005 interesting way. Interesting here means either in a def, or as the
1006 source of a move insn. We only look at insns added since the last
1010 update_regnos_mentioned ()
1012 int last_uid
= last_max_uid
;
1015 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
1018 /* Don't look at old insns. */
1019 if (INSN_UID (insn
) < last_uid
)
1021 /* XXX We should also remember moves over iterations (we already
1022 save the cache, but not the movelist). */
1023 if (copy_insn_p (insn
, NULL
, NULL
))
1024 remember_move (insn
);
1026 else if ((bb
= BLOCK_FOR_INSN (insn
)) != NULL
)
1029 struct ra_bb_info
*info
= (struct ra_bb_info
*) bb
->aux
;
1030 bitmap mentioned
= info
->regnos_mentioned
;
1031 struct df_link
*link
;
1032 if (copy_insn_p (insn
, &source
, NULL
))
1034 remember_move (insn
);
1035 bitmap_set_bit (mentioned
,
1036 REGNO (GET_CODE (source
) == SUBREG
1037 ? SUBREG_REG (source
) : source
));
1039 for (link
= DF_INSN_DEFS (df
, insn
); link
; link
= link
->next
)
1041 bitmap_set_bit (mentioned
, DF_REF_REGNO (link
->ref
));
1046 /* Handle the uses which reach a block end, but were deferred due
1047 to it's regno not being mentioned in that block. This adds the
1048 remaining conflicts and updates also the crosses_call and
1049 spanned_deaths members. */
1052 livethrough_conflicts_bb (bb
)
1055 struct ra_bb_info
*info
= (struct ra_bb_info
*) bb
->aux
;
1059 unsigned int deaths
= 0;
1060 unsigned int contains_call
= 0;
1062 /* If there are no deferred uses, just return. */
1063 if ((first
= bitmap_first_set_bit (info
->live_throughout
)) < 0)
1066 /* First collect the IDs of all defs, count the number of death
1067 containing insns, and if there's some call_insn here. */
1068 all_defs
= BITMAP_XMALLOC ();
1069 for (insn
= bb
->head
; insn
; insn
= NEXT_INSN (insn
))
1074 struct ra_insn_info info
;
1076 info
= insn_df
[INSN_UID (insn
)];
1077 for (n
= 0; n
< info
.num_defs
; n
++)
1078 bitmap_set_bit (all_defs
, DF_REF_ID (info
.defs
[n
]));
1079 if (TEST_BIT (insns_with_deaths
, INSN_UID (insn
)))
1081 if (GET_CODE (insn
) == CALL_INSN
)
1084 if (insn
== bb
->end
)
1088 /* And now, if we have found anything, make all live_through
1089 uses conflict with all defs, and update their other members. */
1090 if (deaths
> 0 || bitmap_first_set_bit (all_defs
) >= 0)
1091 EXECUTE_IF_SET_IN_BITMAP (info
->live_throughout
, first
, use_id
,
1093 struct web_part
*wp
= &web_parts
[df
->def_id
+ use_id
];
1094 unsigned int bl
= rtx_to_bits (DF_REF_REG (wp
->ref
));
1096 wp
= find_web_part (wp
);
1097 wp
->spanned_deaths
+= deaths
;
1098 wp
->crosses_call
|= contains_call
;
1099 conflicts
= get_sub_conflicts (wp
, bl
);
1100 bitmap_operation (conflicts
, conflicts
, all_defs
, BITMAP_IOR
);
1103 BITMAP_XFREE (all_defs
);
1106 /* Allocate the per basic block info for traversing the insn stream for
1107 building live ranges. */
1115 struct ra_bb_info
*info
=
1116 (struct ra_bb_info
*) xcalloc (1, sizeof *info
);
1117 info
->regnos_mentioned
= BITMAP_XMALLOC ();
1118 info
->live_throughout
= BITMAP_XMALLOC ();
1119 info
->old_aux
= bb
->aux
;
1120 bb
->aux
= (void *) info
;
1124 /* Free that per basic block info. */
1132 struct ra_bb_info
*info
= (struct ra_bb_info
*) bb
->aux
;
1133 BITMAP_XFREE (info
->regnos_mentioned
);
1134 BITMAP_XFREE (info
->live_throughout
);
1135 bb
->aux
= info
->old_aux
;
1140 /* Toplevel function for the first part of this file.
1141 Connect web parts, thereby implicitly building webs, and remember
1145 build_web_parts_and_conflicts (df
)
1148 struct df_link
*link
;
1149 struct curr_use use
;
1152 number_seen
= (int *) xcalloc (get_max_uid (), sizeof (int));
1153 visit_trace
= (struct visit_trace
*) xcalloc (get_max_uid (),
1154 sizeof (visit_trace
[0]));
1155 update_regnos_mentioned ();
1157 /* Here's the main loop.
1158 It goes through all insn's, connects web parts along the way, notes
1159 conflicts between webparts, and remembers move instructions. */
1161 for (use
.regno
= 0; use
.regno
< (unsigned int)max_regno
; use
.regno
++)
1162 if (use
.regno
>= FIRST_PSEUDO_REGISTER
|| !fixed_regs
[use
.regno
])
1163 for (link
= df
->regs
[use
.regno
].uses
; link
; link
= link
->next
)
1166 struct ref
*ref
= link
->ref
;
1167 rtx insn
= DF_REF_INSN (ref
);
1168 /* Only recheck marked or new uses, or uses from hardregs. */
1169 if (use
.regno
>= FIRST_PSEUDO_REGISTER
1170 && DF_REF_ID (ref
) < last_use_id
1171 && !TEST_BIT (last_check_uses
, DF_REF_ID (ref
)))
1173 use
.wp
= &web_parts
[df
->def_id
+ DF_REF_ID (ref
)];
1174 use
.x
= DF_REF_REG (ref
);
1175 use
.live_over_abnormal
= 0;
1176 use
.undefined
= rtx_to_undefined (use
.x
);
1178 live_in (df
, &use
, insn
);
1179 if (use
.live_over_abnormal
)
1180 SET_BIT (live_over_abnormal
, DF_REF_ID (ref
));
1183 dump_number_seen ();
1186 struct ra_bb_info
*info
= (struct ra_bb_info
*) bb
->aux
;
1187 livethrough_conflicts_bb (bb
);
1188 bitmap_zero (info
->live_throughout
);
1195 /* Here we look per insn, for DF references being in uses _and_ defs.
1196 This means, in the RTL a (REG xx) expression was seen as a
1197 read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...))
1198 e.g. Our code has created two webs for this, as it should. Unfortunately,
1199 as the REG reference is only one time in the RTL we can't color
1200 both webs different (arguably this also would be wrong for a real
1201 read-mod-write instruction), so we must reconnect such webs. */
1204 connect_rmw_web_parts (df
)
1209 for (i
= 0; i
< df
->use_id
; i
++)
1211 struct web_part
*wp1
= &web_parts
[df
->def_id
+ i
];
1213 struct df_link
*link
;
1216 /* If it's an uninitialized web, we don't want to connect it to others,
1217 as the read cycle in read-mod-write had probably no effect. */
1218 if (find_web_part (wp1
) >= &web_parts
[df
->def_id
])
1220 reg
= DF_REF_REAL_REG (wp1
->ref
);
1221 link
= DF_INSN_DEFS (df
, DF_REF_INSN (wp1
->ref
));
1222 for (; link
; link
= link
->next
)
1223 if (reg
== DF_REF_REAL_REG (link
->ref
))
1225 struct web_part
*wp2
= &web_parts
[DF_REF_ID (link
->ref
)];
1226 union_web_parts (wp1
, wp2
);
1231 /* Deletes all hardregs from *S which are not allowed for MODE. */
1234 prune_hardregs_for_mode (s
, mode
)
1236 enum machine_mode mode
;
1238 AND_HARD_REG_SET (*s
, hardregs_for_mode
[(int) mode
]);
1241 /* Initialize the members of a web, which are deducible from REG. */
1244 init_one_web_common (web
, reg
)
1248 if (GET_CODE (reg
) != REG
)
1250 /* web->id isn't initialized here. */
1251 web
->regno
= REGNO (reg
);
1255 web
->dlink
= (struct dlist
*) ra_calloc (sizeof (struct dlist
));
1256 DLIST_WEB (web
->dlink
) = web
;
1259 the former (superunion) doesn't constrain the graph enough. E.g.
1260 on x86 QImode _requires_ QI_REGS, but as alternate class usually
1261 GENERAL_REGS is given. So the graph is not constrained enough,
1262 thinking it has more freedom then it really has, which leads
1263 to repeated spill tryings. OTOH the latter (only using preferred
1264 class) is too constrained, as normally (e.g. with all SImode
1265 pseudos), they can be allocated also in the alternate class.
1266 What we really want, are the _exact_ hard regs allowed, not
1267 just a class. Later. */
1268 /*web->regclass = reg_class_superunion
1269 [reg_preferred_class (web->regno)]
1270 [reg_alternate_class (web->regno)];*/
1271 /*web->regclass = reg_preferred_class (web->regno);*/
1272 web
->regclass
= reg_class_subunion
1273 [reg_preferred_class (web
->regno
)] [reg_alternate_class (web
->regno
)];
1274 web
->regclass
= reg_preferred_class (web
->regno
);
1275 if (web
->regno
< FIRST_PSEUDO_REGISTER
)
1277 web
->color
= web
->regno
;
1278 put_web (web
, PRECOLORED
);
1279 web
->num_conflicts
= UINT_MAX
;
1280 web
->add_hardregs
= 0;
1281 CLEAR_HARD_REG_SET (web
->usable_regs
);
1282 SET_HARD_REG_BIT (web
->usable_regs
, web
->regno
);
1283 web
->num_freedom
= 1;
1287 HARD_REG_SET alternate
;
1289 put_web (web
, INITIAL
);
1290 /* add_hardregs is wrong in multi-length classes, e.g.
1291 using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS,
1292 where, if it finally is allocated to GENERAL_REGS it needs two,
1293 if allocated to FLOAT_REGS only one hardreg. XXX */
1295 CLASS_MAX_NREGS (web
->regclass
, PSEUDO_REGNO_MODE (web
->regno
)) - 1;
1296 web
->num_conflicts
= 0 * web
->add_hardregs
;
1297 COPY_HARD_REG_SET (web
->usable_regs
,
1298 reg_class_contents
[reg_preferred_class (web
->regno
)]);
1299 COPY_HARD_REG_SET (alternate
,
1300 reg_class_contents
[reg_alternate_class (web
->regno
)]);
1301 IOR_HARD_REG_SET (web
->usable_regs
, alternate
);
1302 /*IOR_HARD_REG_SET (web->usable_regs,
1303 reg_class_contents[reg_alternate_class
1305 AND_COMPL_HARD_REG_SET (web
->usable_regs
, never_use_colors
);
1306 prune_hardregs_for_mode (&web
->usable_regs
,
1307 PSEUDO_REGNO_MODE (web
->regno
));
1308 #ifdef CLASS_CANNOT_CHANGE_MODE
1309 if (web
->mode_changed
)
1310 AND_COMPL_HARD_REG_SET (web
->usable_regs
, reg_class_contents
[
1311 (int) CLASS_CANNOT_CHANGE_MODE
]);
1313 web
->num_freedom
= hard_regs_count (web
->usable_regs
);
1314 web
->num_freedom
-= web
->add_hardregs
;
1315 if (!web
->num_freedom
)
1318 COPY_HARD_REG_SET (web
->orig_usable_regs
, web
->usable_regs
);
1321 /* Initializes WEBs members from REG or zero them. */
1324 init_one_web (web
, reg
)
1328 memset (web
, 0, sizeof (struct web
));
1329 init_one_web_common (web
, reg
);
1330 web
->useless_conflicts
= BITMAP_XMALLOC ();
1333 /* WEB is an old web, meaning it came from the last pass, and got a
1334 color. We want to remember some of it's info, so zero only some
1338 reinit_one_web (web
, reg
)
1342 web
->old_color
= web
->color
+ 1;
1343 init_one_web_common (web
, reg
);
1344 web
->span_deaths
= 0;
1345 web
->spill_temp
= 0;
1346 web
->orig_spill_temp
= 0;
1347 web
->use_my_regs
= 0;
1348 web
->spill_cost
= 0;
1349 web
->was_spilled
= 0;
1350 web
->is_coalesced
= 0;
1351 web
->artificial
= 0;
1352 web
->live_over_abnormal
= 0;
1353 web
->mode_changed
= 0;
1354 web
->move_related
= 0;
1356 web
->target_of_spilled_move
= 0;
1357 web
->num_aliased
= 0;
1358 if (web
->type
== PRECOLORED
)
1362 web
->orig_spill_cost
= 0;
1364 CLEAR_HARD_REG_SET (web
->bias_colors
);
1365 CLEAR_HARD_REG_SET (web
->prefer_colors
);
1366 web
->reg_rtx
= NULL
;
1367 web
->stack_slot
= NULL
;
1368 web
->pattern
= NULL
;
1372 if (!web
->useless_conflicts
)
1376 /* Insert and returns a subweb corresponding to REG into WEB (which
1377 becomes its super web). It must not exist already. */
1380 add_subweb (web
, reg
)
1385 if (GET_CODE (reg
) != SUBREG
)
1387 w
= (struct web
*) xmalloc (sizeof (struct web
));
1388 /* Copy most content from parent-web. */
1390 /* And initialize the private stuff. */
1392 w
->add_hardregs
= CLASS_MAX_NREGS (web
->regclass
, GET_MODE (reg
)) - 1;
1393 w
->num_conflicts
= 0 * w
->add_hardregs
;
1397 w
->parent_web
= web
;
1398 w
->subreg_next
= web
->subreg_next
;
1399 web
->subreg_next
= w
;
1403 /* Similar to add_subweb(), but instead of relying on a given SUBREG,
1404 we have just a size and an offset of the subpart of the REG rtx.
1405 In difference to add_subweb() this marks the new subweb as artificial. */
1408 add_subweb_2 (web
, size_word
)
1410 unsigned int size_word
;
1412 /* To get a correct mode for the to be produced subreg, we don't want to
1413 simply do a mode_for_size() for the mode_class of the whole web.
1414 Suppose we deal with a CDImode web, but search for a 8 byte part.
1415 Now mode_for_size() would only search in the class MODE_COMPLEX_INT
1416 and would find CSImode which probably is not what we want. Instead
1417 we want DImode, which is in a completely other class. For this to work
1418 we instead first search the already existing subwebs, and take
1419 _their_ modeclasses as base for a search for ourself. */
1420 rtx ref_rtx
= (web
->subreg_next
? web
->subreg_next
: web
)->orig_x
;
1421 unsigned int size
= BYTE_LENGTH (size_word
) * BITS_PER_UNIT
;
1422 enum machine_mode mode
;
1423 mode
= mode_for_size (size
, GET_MODE_CLASS (GET_MODE (ref_rtx
)), 0);
1424 if (mode
== BLKmode
)
1425 mode
= mode_for_size (size
, MODE_INT
, 0);
1426 if (mode
== BLKmode
)
1428 web
= add_subweb (web
, gen_rtx_SUBREG (mode
, web
->orig_x
,
1429 BYTE_BEGIN (size_word
)));
1430 web
->artificial
= 1;
1434 /* Initialize all the web parts we are going to need. */
1443 for (no
= 0; no
< df
->def_id
; no
++)
1447 if (no
< last_def_id
&& web_parts
[no
].ref
!= df
->defs
[no
])
1449 web_parts
[no
].ref
= df
->defs
[no
];
1450 /* Uplink might be set from the last iteration. */
1451 if (!web_parts
[no
].uplink
)
1455 /* The last iteration might have left .ref set, while df_analyse()
1456 removed that ref (due to a removed copy insn) from the df->defs[]
1457 array. As we don't check for that in realloc_web_parts()
1459 web_parts
[no
].ref
= NULL
;
1461 for (no
= 0; no
< df
->use_id
; no
++)
1465 if (no
< last_use_id
1466 && web_parts
[no
+ df
->def_id
].ref
!= df
->uses
[no
])
1468 web_parts
[no
+ df
->def_id
].ref
= df
->uses
[no
];
1469 if (!web_parts
[no
+ df
->def_id
].uplink
)
1473 web_parts
[no
+ df
->def_id
].ref
= NULL
;
1476 /* We want to have only one web for each precolored register. */
1477 for (regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
1479 struct web_part
*r1
= NULL
;
1480 struct df_link
*link
;
1481 /* Here once was a test, if there is any DEF at all, and only then to
1482 merge all the parts. This was incorrect, we really also want to have
1483 only one web-part for hardregs, even if there is no explicit DEF. */
1484 /* Link together all defs... */
1485 for (link
= df
->regs
[regno
].defs
; link
; link
= link
->next
)
1488 struct web_part
*r2
= &web_parts
[DF_REF_ID (link
->ref
)];
1492 r1
= union_web_parts (r1
, r2
);
1494 /* ... and all uses. */
1495 for (link
= df
->regs
[regno
].uses
; link
; link
= link
->next
)
1498 struct web_part
*r2
= &web_parts
[df
->def_id
1499 + DF_REF_ID (link
->ref
)];
1503 r1
= union_web_parts (r1
, r2
);
1508 /* In case we want to remember the conflict list of a WEB, before adding
1509 new conflicts, we copy it here to orig_conflict_list. */
1512 copy_conflict_list (web
)
1515 struct conflict_link
*cl
;
1516 if (web
->orig_conflict_list
|| web
->have_orig_conflicts
)
1518 web
->have_orig_conflicts
= 1;
1519 for (cl
= web
->conflict_list
; cl
; cl
= cl
->next
)
1521 struct conflict_link
*ncl
;
1522 ncl
= (struct conflict_link
*) ra_alloc (sizeof *ncl
);
1525 ncl
->next
= web
->orig_conflict_list
;
1526 web
->orig_conflict_list
= ncl
;
1529 struct sub_conflict
*sl
, *nsl
;
1530 for (sl
= cl
->sub
; sl
; sl
= sl
->next
)
1532 nsl
= (struct sub_conflict
*) ra_alloc (sizeof *nsl
);
1535 nsl
->next
= ncl
->sub
;
1542 /* Possibly add an edge from web FROM to TO marking a conflict between
1543 those two. This is one half of marking a complete conflict, which notes
1544 in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
1545 make other conflicts superfluous, because the current TO overlaps some web
1546 already being in conflict with FROM. In this case the smaller webs are
1547 deleted from the conflict list. Likewise if TO is overlapped by a web
1548 already in the list, it isn't added at all. Note, that this can only
1549 happen, if SUBREG webs are involved. */
1552 add_conflict_edge (from
, to
)
1553 struct web
*from
, *to
;
1555 if (from
->type
!= PRECOLORED
)
1557 struct web
*pfrom
= find_web_for_subweb (from
);
1558 struct web
*pto
= find_web_for_subweb (to
);
1559 struct sub_conflict
*sl
;
1560 struct conflict_link
*cl
= pfrom
->conflict_list
;
1563 /* This can happen when subwebs of one web conflict with each
1564 other. In live_out_1() we created such conflicts between yet
1565 undefined webparts and defs of parts which didn't overlap with the
1566 undefined bits. Then later they nevertheless could have merged into
1567 one web, and then we land here. */
1570 if (remember_conflicts
&& !pfrom
->have_orig_conflicts
)
1571 copy_conflict_list (pfrom
);
1572 if (!TEST_BIT (sup_igraph
, (pfrom
->id
* num_webs
+ pto
->id
)))
1574 cl
= (struct conflict_link
*) ra_alloc (sizeof (*cl
));
1577 cl
->next
= pfrom
->conflict_list
;
1578 pfrom
->conflict_list
= cl
;
1579 if (pto
->type
!= SELECT
&& pto
->type
!= COALESCED
)
1580 pfrom
->num_conflicts
+= 1 + pto
->add_hardregs
;
1581 SET_BIT (sup_igraph
, (pfrom
->id
* num_webs
+ pto
->id
));
1585 /* We don't need to test for cl==NULL, because at this point
1586 a cl with cl->t==pto is guaranteed to exist. */
1587 while (cl
->t
!= pto
)
1589 if (pfrom
!= from
|| pto
!= to
)
1591 /* This is a subconflict which should be added.
1592 If we inserted cl in this invocation, we really need to add this
1593 subconflict. If we did _not_ add it here, we only add the
1594 subconflict, if cl already had subconflicts, because otherwise
1595 this indicated, that the whole webs already conflict, which
1596 means we are not interested in this subconflict. */
1597 if (!may_delete
|| cl
->sub
!= NULL
)
1599 sl
= (struct sub_conflict
*) ra_alloc (sizeof (*sl
));
1607 /* pfrom == from && pto == to means, that we are not interested
1608 anymore in the subconflict list for this pair, because anyway
1609 the whole webs conflict. */
1614 /* Record a conflict between two webs, if we haven't recorded it
1618 record_conflict (web1
, web2
)
1619 struct web
*web1
, *web2
;
1621 unsigned int id1
= web1
->id
, id2
= web2
->id
;
1622 unsigned int index
= igraph_index (id1
, id2
);
1623 /* Trivial non-conflict or already recorded conflict. */
1624 if (web1
== web2
|| TEST_BIT (igraph
, index
))
1628 /* As fixed_regs are no targets for allocation, conflicts with them
1630 if ((web1
->regno
< FIRST_PSEUDO_REGISTER
&& fixed_regs
[web1
->regno
])
1631 || (web2
->regno
< FIRST_PSEUDO_REGISTER
&& fixed_regs
[web2
->regno
]))
1633 /* Conflicts with hardregs, which are not even a candidate
1634 for this pseudo are also pointless. */
1635 if ((web1
->type
== PRECOLORED
1636 && ! TEST_HARD_REG_BIT (web2
->usable_regs
, web1
->regno
))
1637 || (web2
->type
== PRECOLORED
1638 && ! TEST_HARD_REG_BIT (web1
->usable_regs
, web2
->regno
)))
1640 /* Similar if the set of possible hardregs don't intersect. This iteration
1641 those conflicts are useless (and would make num_conflicts wrong, because
1642 num_freedom is calculated from the set of possible hardregs).
1643 But in presence of spilling and incremental building of the graph we
1644 need to note all uses of webs conflicting with the spilled ones.
1645 Because the set of possible hardregs can change in the next round for
1646 spilled webs, we possibly have then conflicts with webs which would
1647 be excluded now (because then hardregs intersect). But we actually
1648 need to check those uses, and to get hold of them, we need to remember
1649 also webs conflicting with this one, although not conflicting in this
1650 round because of non-intersecting hardregs. */
1651 if (web1
->type
!= PRECOLORED
&& web2
->type
!= PRECOLORED
1652 && ! hard_regs_intersect_p (&web1
->usable_regs
, &web2
->usable_regs
))
1654 struct web
*p1
= find_web_for_subweb (web1
);
1655 struct web
*p2
= find_web_for_subweb (web2
);
1656 /* We expect these to be rare enough to justify bitmaps. And because
1657 we have only a special use for it, we note only the superwebs. */
1658 bitmap_set_bit (p1
->useless_conflicts
, p2
->id
);
1659 bitmap_set_bit (p2
->useless_conflicts
, p1
->id
);
1662 SET_BIT (igraph
, index
);
1663 add_conflict_edge (web1
, web2
);
1664 add_conflict_edge (web2
, web1
);
1667 /* For each web W this produces the missing subwebs Wx, such that it's
1668 possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
1671 build_inverse_webs (web
)
1674 struct web
*sweb
= web
->subreg_next
;
1675 unsigned HOST_WIDE_INT undef
;
1677 undef
= rtx_to_undefined (web
->orig_x
);
1678 for (; sweb
; sweb
= sweb
->subreg_next
)
1679 /* Only create inverses of non-artificial webs. */
1680 if (!sweb
->artificial
)
1682 unsigned HOST_WIDE_INT bits
;
1683 bits
= undef
& ~ rtx_to_undefined (sweb
->orig_x
);
1686 unsigned int size_word
= undef_to_size_word (web
->orig_x
, &bits
);
1687 if (!find_subweb_2 (web
, size_word
))
1688 add_subweb_2 (web
, size_word
);
1693 /* Copies the content of WEB to a new one, and link it into WL.
1694 Used for consistency checking. */
1699 struct web_link
**wl
;
1701 struct web
*cweb
= (struct web
*) xmalloc (sizeof *cweb
);
1702 struct web_link
*link
= (struct web_link
*) ra_alloc (sizeof *link
);
1709 /* Given a list of webs LINK, compare the content of the webs therein
1710 with the global webs of the same ID. For consistency checking. */
1713 compare_and_free_webs (link
)
1714 struct web_link
**link
;
1716 struct web_link
*wl
;
1717 for (wl
= *link
; wl
; wl
= wl
->next
)
1719 struct web
*web1
= wl
->web
;
1720 struct web
*web2
= ID2WEB (web1
->id
);
1721 if (web1
->regno
!= web2
->regno
1722 || web1
->crosses_call
!= web2
->crosses_call
1723 || web1
->live_over_abnormal
!= web2
->live_over_abnormal
1724 || web1
->mode_changed
!= web2
->mode_changed
1725 || !rtx_equal_p (web1
->orig_x
, web2
->orig_x
)
1726 || web1
->type
!= web2
->type
1727 /* Only compare num_defs/num_uses with non-hardreg webs.
1728 E.g. the number of uses of the framepointer changes due to
1729 inserting spill code. */
1730 || (web1
->type
!= PRECOLORED
&&
1731 (web1
->num_uses
!= web2
->num_uses
1732 || web1
->num_defs
!= web2
->num_defs
)))
1734 if (web1
->type
!= PRECOLORED
)
1737 for (i
= 0; i
< web1
->num_defs
; i
++)
1738 if (web1
->defs
[i
] != web2
->defs
[i
])
1740 for (i
= 0; i
< web1
->num_uses
; i
++)
1741 if (web1
->uses
[i
] != web2
->uses
[i
])
1744 if (web1
->type
== PRECOLORED
)
1756 /* Setup and fill uses[] and defs[] arrays of the webs. */
1759 init_webs_defs_uses ()
1762 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
1764 struct web
*web
= DLIST_WEB (d
);
1765 unsigned int def_i
, use_i
;
1766 struct df_link
*link
;
1769 if (web
->type
== PRECOLORED
)
1771 web
->num_defs
= web
->num_uses
= 0;
1775 web
->defs
= (struct ref
**) xmalloc (web
->num_defs
*
1776 sizeof (web
->defs
[0]));
1778 web
->uses
= (struct ref
**) xmalloc (web
->num_uses
*
1779 sizeof (web
->uses
[0]));
1781 for (link
= web
->temp_refs
; link
; link
= link
->next
)
1783 if (DF_REF_REG_DEF_P (link
->ref
))
1784 web
->defs
[def_i
++] = link
->ref
;
1786 web
->uses
[use_i
++] = link
->ref
;
1788 web
->temp_refs
= NULL
;
1789 if (def_i
!= web
->num_defs
|| use_i
!= web
->num_uses
)
1794 /* Called by parts_to_webs(). This creates (or recreates) the webs (and
1795 subwebs) from web parts, gives them IDs (only to super webs), and sets
1796 up use2web and def2web arrays. */
1799 parts_to_webs_1 (df
, copy_webs
, all_refs
)
1801 struct web_link
**copy_webs
;
1802 struct df_link
*all_refs
;
1805 unsigned int webnum
;
1806 unsigned int def_id
= df
->def_id
;
1807 unsigned int use_id
= df
->use_id
;
1808 struct web_part
*wp_first_use
= &web_parts
[def_id
];
1810 /* For each root web part: create and initialize a new web,
1811 setup def2web[] and use2web[] for all defs and uses, and
1812 id2web for all new webs. */
1815 for (i
= 0; i
< def_id
+ use_id
; i
++)
1817 struct web
*subweb
, *web
= 0; /* Initialize web to silence warnings. */
1818 struct web_part
*wp
= &web_parts
[i
];
1819 struct ref
*ref
= wp
->ref
;
1820 unsigned int ref_id
;
1827 all_refs
[i
].ref
= ref
;
1828 reg
= DF_REF_REG (ref
);
1831 /* If we have a web part root, create a new web. */
1832 unsigned int newid
= ~(unsigned)0;
1833 unsigned int old_web
= 0;
1835 /* In the first pass, there are no old webs, so unconditionally
1836 allocate a new one. */
1839 web
= (struct web
*) xmalloc (sizeof (struct web
));
1840 newid
= last_num_webs
++;
1841 init_one_web (web
, GET_CODE (reg
) == SUBREG
1842 ? SUBREG_REG (reg
) : reg
);
1844 /* Otherwise, we look for an old web. */
1847 /* Remember, that use2web == def2web + def_id.
1848 Ergo is def2web[i] == use2web[i - def_id] for i >= def_id.
1849 So we only need to look into def2web[] array.
1850 Try to look at the web, which formerly belonged to this
1853 /* Or which belonged to this hardreg. */
1854 if (!web
&& DF_REF_REGNO (ref
) < FIRST_PSEUDO_REGISTER
)
1855 web
= hardreg2web
[DF_REF_REGNO (ref
)];
1858 /* If we found one, reuse it. */
1859 web
= find_web_for_subweb (web
);
1860 remove_list (web
->dlink
, &WEBS(INITIAL
));
1862 copy_web (web
, copy_webs
);
1866 /* Otherwise use a new one. First from the free list. */
1868 web
= DLIST_WEB (pop_list (&WEBS(FREE
)));
1871 /* Else allocate a new one. */
1872 web
= (struct web
*) xmalloc (sizeof (struct web
));
1873 newid
= last_num_webs
++;
1876 /* The id is zeroed in init_one_web(). */
1877 if (newid
== ~(unsigned)0)
1880 reinit_one_web (web
, GET_CODE (reg
) == SUBREG
1881 ? SUBREG_REG (reg
) : reg
);
1883 init_one_web (web
, GET_CODE (reg
) == SUBREG
1884 ? SUBREG_REG (reg
) : reg
);
1885 web
->old_web
= (old_web
&& web
->type
!= PRECOLORED
) ? 1 : 0;
1887 web
->span_deaths
= wp
->spanned_deaths
;
1888 web
->crosses_call
= wp
->crosses_call
;
1890 web
->temp_refs
= NULL
;
1892 if (web
->regno
< FIRST_PSEUDO_REGISTER
&& !hardreg2web
[web
->regno
])
1893 hardreg2web
[web
->regno
] = web
;
1894 else if (web
->regno
< FIRST_PSEUDO_REGISTER
1895 && hardreg2web
[web
->regno
] != web
)
1899 /* If this reference already had a web assigned, we are done.
1900 This test better is equivalent to the web being an old web.
1901 Otherwise something is screwed. (This is tested) */
1902 if (def2web
[i
] != NULL
)
1905 web
= find_web_for_subweb (web
);
1906 /* But if this ref includes a mode change, or was a use live
1907 over an abnormal call, set appropriate flags in the web. */
1908 if ((DF_REF_FLAGS (ref
) & DF_REF_MODE_CHANGE
) != 0
1909 && web
->regno
>= FIRST_PSEUDO_REGISTER
)
1910 web
->mode_changed
= 1;
1912 && TEST_BIT (live_over_abnormal
, ref_id
))
1913 web
->live_over_abnormal
= 1;
1914 /* And check, that it's not a newly allocated web. This would be
1915 an inconsistency. */
1916 if (!web
->old_web
|| web
->type
== PRECOLORED
)
1920 /* In case this was no web part root, we need to initialize WEB
1921 from the ref2web array belonging to the root. */
1924 struct web_part
*rwp
= find_web_part (wp
);
1925 unsigned int j
= DF_REF_ID (rwp
->ref
);
1926 if (rwp
< wp_first_use
)
1930 web
= find_web_for_subweb (web
);
1933 /* Remember all references for a web in a single linked list. */
1934 all_refs
[i
].next
= web
->temp_refs
;
1935 web
->temp_refs
= &all_refs
[i
];
1937 /* And the test, that if def2web[i] was NULL above, that we are _not_
1939 if (web
->old_web
&& web
->type
!= PRECOLORED
)
1942 /* Possible create a subweb, if this ref was a subreg. */
1943 if (GET_CODE (reg
) == SUBREG
)
1945 subweb
= find_subweb (web
, reg
);
1948 subweb
= add_subweb (web
, reg
);
1956 /* And look, if the ref involves an invalid mode change. */
1957 if ((DF_REF_FLAGS (ref
) & DF_REF_MODE_CHANGE
) != 0
1958 && web
->regno
>= FIRST_PSEUDO_REGISTER
)
1959 web
->mode_changed
= 1;
1961 /* Setup def2web, or use2web, and increment num_defs or num_uses. */
1964 /* Some sanity checks. */
1967 struct web
*compare
= def2web
[i
];
1968 if (i
< last_def_id
)
1970 if (web
->old_web
&& compare
!= subweb
)
1973 if (!web
->old_web
&& compare
)
1975 if (compare
&& compare
!= subweb
)
1978 def2web
[i
] = subweb
;
1985 struct web
*compare
= use2web
[ref_id
];
1986 if (ref_id
< last_use_id
)
1988 if (web
->old_web
&& compare
!= subweb
)
1991 if (!web
->old_web
&& compare
)
1993 if (compare
&& compare
!= subweb
)
1996 use2web
[ref_id
] = subweb
;
1998 if (TEST_BIT (live_over_abnormal
, ref_id
))
1999 web
->live_over_abnormal
= 1;
2003 /* We better now have exactly as many webs as we had web part roots. */
2004 if (webnum
!= num_webs
)
2010 /* This builds full webs out of web parts, without relating them to each
2011 other (i.e. without creating the conflict edges). */
2018 unsigned int webnum
;
2019 struct web_link
*copy_webs
= NULL
;
2021 struct df_link
*all_refs
;
2024 /* First build webs and ordinary subwebs. */
2025 all_refs
= (struct df_link
*) xcalloc (df
->def_id
+ df
->use_id
,
2026 sizeof (all_refs
[0]));
2027 webnum
= parts_to_webs_1 (df
, ©_webs
, all_refs
);
2029 /* Setup the webs for hardregs which are still missing (weren't
2030 mentioned in the code). */
2031 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
2032 if (!hardreg2web
[i
])
2034 struct web
*web
= (struct web
*) xmalloc (sizeof (struct web
));
2035 init_one_web (web
, gen_rtx_REG (reg_raw_mode
[i
], i
));
2036 web
->id
= last_num_webs
++;
2037 hardreg2web
[web
->regno
] = web
;
2039 num_webs
= last_num_webs
;
2041 /* Now create all artificial subwebs, i.e. those, which do
2042 not correspond to a real subreg in the current function's RTL, but
2043 which nevertheless is a target of a conflict.
2044 XXX we need to merge this loop with the one above, which means, we need
2045 a way to later override the artificiality. Beware: currently
2046 add_subweb_2() relies on the existence of normal subwebs for deducing
2047 a sane mode to use for the artificial subwebs. */
2048 for (i
= 0; i
< df
->def_id
+ df
->use_id
; i
++)
2050 struct web_part
*wp
= &web_parts
[i
];
2051 struct tagged_conflict
*cl
;
2053 if (wp
->uplink
|| !wp
->ref
)
2055 if (wp
->sub_conflicts
)
2060 web
= find_web_for_subweb (web
);
2061 for (cl
= wp
->sub_conflicts
; cl
; cl
= cl
->next
)
2062 if (!find_subweb_2 (web
, cl
->size_word
))
2063 add_subweb_2 (web
, cl
->size_word
);
2066 /* And now create artificial subwebs needed for representing the inverse
2067 of some subwebs. This also gives IDs to all subwebs. */
2068 webnum
= last_num_webs
;
2069 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2071 struct web
*web
= DLIST_WEB (d
);
2072 if (web
->subreg_next
)
2075 build_inverse_webs (web
);
2076 for (sweb
= web
->subreg_next
; sweb
; sweb
= sweb
->subreg_next
)
2077 sweb
->id
= webnum
++;
2081 /* Now that everyone has an ID, we can setup the id2web array. */
2082 id2web
= (struct web
**) xcalloc (webnum
, sizeof (id2web
[0]));
2083 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2085 struct web
*web
= DLIST_WEB (d
);
2086 ID2WEB (web
->id
) = web
;
2087 for (web
= web
->subreg_next
; web
; web
= web
->subreg_next
)
2088 ID2WEB (web
->id
) = web
;
2090 num_subwebs
= webnum
- last_num_webs
;
2091 num_allwebs
= num_webs
+ num_subwebs
;
2092 num_webs
+= num_subwebs
;
2094 /* Allocate and clear the conflict graph bitmaps. */
2095 igraph
= sbitmap_alloc (num_webs
* num_webs
/ 2);
2096 sup_igraph
= sbitmap_alloc (num_webs
* num_webs
);
2097 sbitmap_zero (igraph
);
2098 sbitmap_zero (sup_igraph
);
2100 /* Distribute the references to their webs. */
2101 init_webs_defs_uses ();
2102 /* And do some sanity checks if old webs, and those recreated from the
2103 really are the same. */
2104 compare_and_free_webs (©_webs
);
2108 /* This deletes all conflicts to and from webs which need to be renewed
2109 in this pass of the allocator, i.e. those which were spilled in the
2110 last pass. Furthermore it also rebuilds the bitmaps for the remaining
2117 bitmap newwebs
= BITMAP_XMALLOC ();
2118 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
2120 struct web
*web
= ID2WEB (i
);
2121 /* Hardreg webs and non-old webs are new webs (which
2122 need rebuilding). */
2123 if (web
->type
== PRECOLORED
|| !web
->old_web
)
2124 bitmap_set_bit (newwebs
, web
->id
);
2127 for (i
= 0; i
< num_webs
- num_subwebs
; i
++)
2129 struct web
*web
= ID2WEB (i
);
2130 struct conflict_link
*cl
;
2131 struct conflict_link
**pcl
;
2132 pcl
= &(web
->conflict_list
);
2134 /* First restore the conflict list to be like it was before
2136 if (web
->have_orig_conflicts
)
2138 web
->conflict_list
= web
->orig_conflict_list
;
2139 web
->orig_conflict_list
= NULL
;
2141 if (web
->orig_conflict_list
)
2144 /* New non-precolored webs, have no conflict list. */
2145 if (web
->type
!= PRECOLORED
&& !web
->old_web
)
2148 /* Useless conflicts will be rebuilt completely. But check
2149 for cleanliness, as the web might have come from the
2151 if (bitmap_first_set_bit (web
->useless_conflicts
) >= 0)
2156 /* Useless conflicts with new webs will be rebuilt if they
2158 bitmap_operation (web
->useless_conflicts
, web
->useless_conflicts
,
2159 newwebs
, BITMAP_AND_COMPL
);
2160 /* Go through all conflicts, and retain those to old webs. */
2161 for (cl
= web
->conflict_list
; cl
; cl
= cl
->next
)
2163 if (cl
->t
->old_web
|| cl
->t
->type
== PRECOLORED
)
2168 /* Also restore the entries in the igraph bitmaps. */
2169 web
->num_conflicts
+= 1 + cl
->t
->add_hardregs
;
2170 SET_BIT (sup_igraph
, (web
->id
* num_webs
+ cl
->t
->id
));
2171 /* No subconflicts mean full webs conflict. */
2173 SET_BIT (igraph
, igraph_index (web
->id
, cl
->t
->id
));
2175 /* Else only the parts in cl->sub must be in the
2178 struct sub_conflict
*sl
;
2179 for (sl
= cl
->sub
; sl
; sl
= sl
->next
)
2180 SET_BIT (igraph
, igraph_index (sl
->s
->id
, sl
->t
->id
));
2186 web
->have_orig_conflicts
= 0;
2188 BITMAP_XFREE (newwebs
);
2191 /* For each web check it's num_conflicts member against that
2192 number, as calculated from scratch from all neighbors. */
2196 check_conflict_numbers ()
2199 for (i
= 0; i
< num_webs
; i
++)
2201 struct web
*web
= ID2WEB (i
);
2202 int new_conf
= 0 * web
->add_hardregs
;
2203 struct conflict_link
*cl
;
2204 for (cl
= web
->conflict_list
; cl
; cl
= cl
->next
)
2205 if (cl
->t
->type
!= SELECT
&& cl
->t
->type
!= COALESCED
)
2206 new_conf
+= 1 + cl
->t
->add_hardregs
;
2207 if (web
->type
!= PRECOLORED
&& new_conf
!= web
->num_conflicts
)
2213 /* Convert the conflicts between web parts to conflicts between full webs.
2215 This can't be done in parts_to_webs(), because for recording conflicts
2216 between webs we need to know their final usable_regs set, which is used
2217 to discard non-conflicts (between webs having no hard reg in common).
2218 But this is set for spill temporaries only after the webs itself are
2219 built. Until then the usable_regs set is based on the pseudo regno used
2220 in this web, which may contain far less registers than later determined.
2221 This would result in us loosing conflicts (due to record_conflict()
2222 thinking that a web can only be allocated to the current usable_regs,
2223 whereas later this is extended) leading to colorings, where some regs which
2224 in reality conflict get the same color. */
2227 conflicts_between_webs (df
)
2234 bitmap ignore_defs
= BITMAP_XMALLOC ();
2235 unsigned int have_ignored
;
2236 unsigned int *pass_cache
= (unsigned int *) xcalloc (num_webs
, sizeof (int));
2237 unsigned int pass
= 0;
2242 /* It is possible, that in the conflict bitmaps still some defs I are noted,
2243 which have web_parts[I].ref being NULL. This can happen, when from the
2244 last iteration the conflict bitmap for this part wasn't deleted, but a
2245 conflicting move insn was removed. It's DEF is still in the conflict
2246 bitmap, but it doesn't exist anymore in df->defs. To not have to check
2247 it in the tight loop below, we instead remember the ID's of them in a
2248 bitmap, and loop only over IDs which are not in it. */
2249 for (i
= 0; i
< df
->def_id
; i
++)
2250 if (web_parts
[i
].ref
== NULL
)
2251 bitmap_set_bit (ignore_defs
, i
);
2252 have_ignored
= (bitmap_first_set_bit (ignore_defs
) >= 0);
2254 /* Now record all conflicts between webs. Note that we only check
2255 the conflict bitmaps of all defs. Conflict bitmaps are only in
2256 webpart roots. If they are in uses, those uses are roots, which
2257 means, that this is an uninitialized web, whose conflicts
2258 don't matter. Nevertheless for hardregs we also need to check uses.
2259 E.g. hardregs used for argument passing have no DEF in the RTL,
2260 but if they have uses, they indeed conflict with all DEFs they
2262 for (i
= 0; i
< df
->def_id
+ df
->use_id
; i
++)
2264 struct tagged_conflict
*cl
= web_parts
[i
].sub_conflicts
;
2265 struct web
*supweb1
;
2268 && DF_REF_REGNO (web_parts
[i
].ref
) >= FIRST_PSEUDO_REGISTER
))
2270 supweb1
= def2web
[i
];
2271 supweb1
= find_web_for_subweb (supweb1
);
2272 for (; cl
; cl
= cl
->next
)
2276 struct web
*web1
= find_subweb_2 (supweb1
, cl
->size_word
);
2278 bitmap_operation (cl
->conflicts
, cl
->conflicts
, ignore_defs
,
2280 /* We reduce the number of calls to record_conflict() with this
2281 pass thing. record_conflict() itself also has some early-out
2282 optimizations, but here we can use the special properties of
2283 the loop (constant web1) to reduce that even more.
2284 We once used an sbitmap of already handled web indices,
2285 but sbitmaps are slow to clear and bitmaps are slow to
2286 set/test. The current approach needs more memory, but
2287 locality is large. */
2290 /* Note, that there are only defs in the conflicts bitset. */
2291 EXECUTE_IF_SET_IN_BITMAP (
2292 cl
->conflicts
, 0, j
,
2294 struct web
*web2
= def2web
[j
];
2295 unsigned int id2
= web2
->id
;
2296 if (pass_cache
[id2
] != pass
)
2298 pass_cache
[id2
] = pass
;
2299 record_conflict (web1
, web2
);
2306 BITMAP_XFREE (ignore_defs
);
2309 /* Pseudos can't go in stack regs if they are live at the beginning of
2310 a block that is reached by an abnormal edge. */
2311 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2313 struct web
*web
= DLIST_WEB (d
);
2315 if (web
->live_over_abnormal
)
2316 for (j
= FIRST_STACK_REG
; j
<= LAST_STACK_REG
; j
++)
2317 record_conflict (web
, hardreg2web
[j
]);
2322 /* Remember that a web was spilled, and change some characteristics
2326 remember_web_was_spilled (web
)
2330 unsigned int found_size
= 0;
2332 web
->spill_temp
= 1;
2334 /* From now on don't use reg_pref/alt_class (regno) anymore for
2335 this web, but instead usable_regs. We can't use spill_temp for
2336 this, as it might get reset later, when we are coalesced to a
2337 non-spill-temp. In that case we still want to use usable_regs. */
2338 web
->use_my_regs
= 1;
2340 /* We don't constrain spill temporaries in any way for now.
2341 It's wrong sometimes to have the same constraints or
2342 preferences as the original pseudo, esp. if they were very narrow.
2343 (E.g. there once was a reg wanting class AREG (only one register)
2344 without alternative class. As long, as also the spill-temps for
2345 this pseudo had the same constraints it was spilled over and over.
2346 Ideally we want some constraints also on spill-temps: Because they are
2347 not only loaded/stored, but also worked with, any constraints from insn
2348 alternatives needs applying. Currently this is dealt with by reload, as
2349 many other things, but at some time we want to integrate that
2350 functionality into the allocator. */
2351 if (web
->regno
>= max_normal_pseudo
)
2353 COPY_HARD_REG_SET (web
->usable_regs
,
2354 reg_class_contents
[reg_preferred_class (web
->regno
)]);
2355 IOR_HARD_REG_SET (web
->usable_regs
,
2356 reg_class_contents
[reg_alternate_class (web
->regno
)]);
2359 COPY_HARD_REG_SET (web
->usable_regs
,
2360 reg_class_contents
[(int) GENERAL_REGS
]);
2361 AND_COMPL_HARD_REG_SET (web
->usable_regs
, never_use_colors
);
2362 prune_hardregs_for_mode (&web
->usable_regs
, PSEUDO_REGNO_MODE (web
->regno
));
2363 #ifdef CLASS_CANNOT_CHANGE_MODE
2364 if (web
->mode_changed
)
2365 AND_COMPL_HARD_REG_SET (web
->usable_regs
, reg_class_contents
[
2366 (int) CLASS_CANNOT_CHANGE_MODE
]);
2368 web
->num_freedom
= hard_regs_count (web
->usable_regs
);
2369 if (!web
->num_freedom
)
2371 COPY_HARD_REG_SET (web
->orig_usable_regs
, web
->usable_regs
);
2372 /* Now look for a class, which is subset of our constraints, to
2373 setup add_hardregs, and regclass for debug output. */
2374 web
->regclass
= NO_REGS
;
2375 for (i
= (int) ALL_REGS
- 1; i
> 0; i
--)
2379 COPY_HARD_REG_SET (test
, reg_class_contents
[i
]);
2380 AND_COMPL_HARD_REG_SET (test
, never_use_colors
);
2381 GO_IF_HARD_REG_SUBSET (test
, web
->usable_regs
, found
);
2384 /* Measure the actual number of bits which really are overlapping
2385 the target regset, not just the reg_class_size. */
2386 size
= hard_regs_count (test
);
2387 if (found_size
< size
)
2389 web
->regclass
= (enum reg_class
) i
;
2394 adjust
= 0 * web
->add_hardregs
;
2396 CLASS_MAX_NREGS (web
->regclass
, PSEUDO_REGNO_MODE (web
->regno
)) - 1;
2397 web
->num_freedom
-= web
->add_hardregs
;
2398 if (!web
->num_freedom
)
2400 adjust
-= 0 * web
->add_hardregs
;
2401 web
->num_conflicts
-= adjust
;
2404 /* Look at each web, if it is used as spill web. Or better said,
2405 if it will be spillable in this pass. */
2408 detect_spill_temps ()
2411 bitmap already
= BITMAP_XMALLOC ();
2413 /* Detect webs used for spill temporaries. */
2414 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2416 struct web
*web
= DLIST_WEB (d
);
2418 /* Below only the detection of spill temporaries. We never spill
2419 precolored webs, so those can't be spill temporaries. The code above
2420 (remember_web_was_spilled) can't currently cope with hardregs
2422 if (web
->regno
< FIRST_PSEUDO_REGISTER
)
2424 /* Uninitialized webs can't be spill-temporaries. */
2425 if (web
->num_defs
== 0)
2428 /* A web with only defs and no uses can't be spilled. Nevertheless
2429 it must get a color, as it takes away an register from all webs
2430 live at these defs. So we make it a short web. */
2431 if (web
->num_uses
== 0)
2432 web
->spill_temp
= 3;
2433 /* A web which was spilled last time, but for which no insns were
2434 emitted (can happen with IR spilling ignoring sometimes
2436 else if (web
->changed
)
2437 web
->spill_temp
= 1;
2438 /* A spill temporary has one def, one or more uses, all uses
2439 are in one insn, and either the def or use insn was inserted
2440 by the allocator. */
2441 /* XXX not correct currently. There might also be spill temps
2442 involving more than one def. Usually that's an additional
2443 clobber in the using instruction. We might also constrain
2444 ourself to that, instead of like currently marking all
2445 webs involving any spill insns at all. */
2449 int spill_involved
= 0;
2450 for (i
= 0; i
< web
->num_uses
&& !spill_involved
; i
++)
2451 if (DF_REF_INSN_UID (web
->uses
[i
]) >= orig_max_uid
)
2453 for (i
= 0; i
< web
->num_defs
&& !spill_involved
; i
++)
2454 if (DF_REF_INSN_UID (web
->defs
[i
]) >= orig_max_uid
)
2457 if (spill_involved
/* && ra_pass > 2*/)
2459 int num_deaths
= web
->span_deaths
;
2460 /* Mark webs involving at least one spill insn as
2462 remember_web_was_spilled (web
);
2463 /* Search for insns which define and use the web in question
2464 at the same time, i.e. look for rmw insns. If these insns
2465 are also deaths of other webs they might have been counted
2466 as such into web->span_deaths. But because of the rmw nature
2467 of this insn it is no point where a load/reload could be
2468 placed successfully (it would still conflict with the
2469 dead web), so reduce the number of spanned deaths by those
2470 insns. Note that sometimes such deaths are _not_ counted,
2471 so negative values can result. */
2472 bitmap_zero (already
);
2473 for (i
= 0; i
< web
->num_defs
; i
++)
2475 rtx insn
= web
->defs
[i
]->insn
;
2476 if (TEST_BIT (insns_with_deaths
, INSN_UID (insn
))
2477 && !bitmap_bit_p (already
, INSN_UID (insn
)))
2480 bitmap_set_bit (already
, INSN_UID (insn
));
2481 /* Only decrement it once for each insn. */
2482 for (j
= 0; j
< web
->num_uses
; j
++)
2483 if (web
->uses
[j
]->insn
== insn
)
2490 /* But mark them specially if they could possibly be spilled,
2491 either because they cross some deaths (without the above
2492 mentioned ones) or calls. */
2493 if (web
->crosses_call
|| num_deaths
> 0)
2494 web
->spill_temp
= 1 * 2;
2496 /* A web spanning no deaths can't be spilled either. No loads
2497 would be created for it, ergo no defs. So the insns wouldn't
2498 change making the graph not easier to color. Make this also
2499 a short web. Don't do this if it crosses calls, as these are
2500 also points of reloads. */
2501 else if (web
->span_deaths
== 0 && !web
->crosses_call
)
2502 web
->spill_temp
= 3;
2504 web
->orig_spill_temp
= web
->spill_temp
;
2506 BITMAP_XFREE (already
);
2509 /* Returns nonzero if the rtx MEM refers somehow to a stack location. */
2512 memref_is_stack_slot (mem
)
2515 rtx ad
= XEXP (mem
, 0);
2517 if (GET_CODE (ad
) != PLUS
|| GET_CODE (XEXP (ad
, 1)) != CONST_INT
)
2520 if (x
== frame_pointer_rtx
|| x
== hard_frame_pointer_rtx
2521 || (x
== arg_pointer_rtx
&& fixed_regs
[ARG_POINTER_REGNUM
])
2522 || x
== stack_pointer_rtx
)
2527 /* Returns nonzero, if rtx X somewhere contains any pseudo register. */
2535 if (GET_CODE (x
) == SUBREG
)
2537 if (GET_CODE (x
) == REG
)
2539 if (REGNO (x
) >= FIRST_PSEUDO_REGISTER
)
2545 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
2546 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
2549 if (contains_pseudo (XEXP (x
, i
)))
2552 else if (fmt
[i
] == 'E')
2555 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
2556 if (contains_pseudo (XVECEXP (x
, i
, j
)))
2562 /* Returns nonzero, if we are able to rematerialize something with
2563 value X. If it's not a general operand, we test if we can produce
2564 a valid insn which set a pseudo to that value, and that insn doesn't
2565 clobber anything. */
2567 static GTY(()) rtx remat_test_insn
;
2572 int num_clobbers
= 0;
2575 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
2576 if (general_operand (x
, GET_MODE (x
)))
2579 /* Otherwise, check if we can make a valid insn from it. First initialize
2580 our test insn if we haven't already. */
2581 if (remat_test_insn
== 0)
2584 = make_insn_raw (gen_rtx_SET (VOIDmode
,
2585 gen_rtx_REG (word_mode
,
2586 FIRST_PSEUDO_REGISTER
* 2),
2588 NEXT_INSN (remat_test_insn
) = PREV_INSN (remat_test_insn
) = 0;
2591 /* Now make an insn like the one we would make when rematerializing
2592 the value X and see if valid. */
2593 PUT_MODE (SET_DEST (PATTERN (remat_test_insn
)), GET_MODE (x
));
2594 SET_SRC (PATTERN (remat_test_insn
)) = x
;
2595 /* XXX For now we don't allow any clobbers to be added, not just no
2596 hardreg clobbers. */
2597 return ((icode
= recog (PATTERN (remat_test_insn
), remat_test_insn
,
2598 &num_clobbers
)) >= 0
2599 && (num_clobbers
== 0
2600 /*|| ! added_clobbers_hard_reg_p (icode)*/));
2603 /* Look at all webs, if they perhaps are rematerializable.
2604 They are, if all their defs are simple sets to the same value,
2605 and that value is simple enough, and want_to_remat() holds for it. */
2608 detect_remat_webs ()
2611 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2613 struct web
*web
= DLIST_WEB (d
);
2616 /* Hardregs and useless webs aren't spilled -> no remat necessary.
2617 Defless webs obviously also can't be rematerialized. */
2618 if (web
->regno
< FIRST_PSEUDO_REGISTER
|| !web
->num_defs
2621 for (i
= 0; i
< web
->num_defs
; i
++)
2624 rtx set
= single_set (insn
= DF_REF_INSN (web
->defs
[i
]));
2628 src
= SET_SRC (set
);
2629 /* When only subregs of the web are set it isn't easily
2630 rematerializable. */
2631 if (!rtx_equal_p (SET_DEST (set
), web
->orig_x
))
2633 /* If we already have a pattern it must be equal to the current. */
2634 if (pat
&& !rtx_equal_p (pat
, src
))
2636 /* Don't do the expensive checks multiple times. */
2639 /* For now we allow only constant sources. */
2640 if ((CONSTANT_P (src
)
2641 /* If the whole thing is stable already, it is a source for
2642 remat, no matter how complicated (probably all needed
2643 resources for it are live everywhere, and don't take
2644 additional register resources). */
2645 /* XXX Currently we can't use patterns which contain
2646 pseudos, _even_ if they are stable. The code simply isn't
2647 prepared for that. All those operands can't be spilled (or
2648 the dependent remat webs are not remat anymore), so they
2649 would be oldwebs in the next iteration. But currently
2650 oldwebs can't have their references changed. The
2651 incremental machinery barfs on that. */
2652 || (!rtx_unstable_p (src
) && !contains_pseudo (src
))
2653 /* Additionally also memrefs to stack-slots are useful, when
2654 we created them ourself. They might not have set their
2655 unchanging flag set, but nevertheless they are stable across
2656 the livetime in question. */
2657 || (GET_CODE (src
) == MEM
2658 && INSN_UID (insn
) >= orig_max_uid
2659 && memref_is_stack_slot (src
)))
2660 /* And we must be able to construct an insn without
2661 side-effects to actually load that value into a reg. */
2662 && want_to_remat (src
))
2667 if (pat
&& i
== web
->num_defs
)
2672 /* Determine the spill costs of all webs. */
2675 determine_web_costs ()
2678 for (d
= WEBS(INITIAL
); d
; d
= d
->next
)
2680 unsigned int i
, num_loads
;
2681 int load_cost
, store_cost
;
2682 unsigned HOST_WIDE_INT w
;
2683 struct web
*web
= DLIST_WEB (d
);
2684 if (web
->type
== PRECOLORED
)
2686 /* Get costs for one load/store. Note that we offset them by 1,
2687 because some patterns have a zero rtx_cost(), but we of course
2688 still need the actual load/store insns. With zero all those
2689 webs would be the same, no matter how often and where
2693 /* This web is rematerializable. Beware, we set store_cost to
2694 zero optimistically assuming, that we indeed don't emit any
2695 stores in the spill-code addition. This might be wrong if
2696 at the point of the load not all needed resources are
2697 available, in which case we emit a stack-based load, for
2698 which we in turn need the according stores. */
2699 load_cost
= 1 + rtx_cost (web
->pattern
, 0);
2704 load_cost
= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
),
2706 store_cost
= 1 + MEMORY_MOVE_COST (GET_MODE (web
->orig_x
),
2709 /* We create only loads at deaths, whose number is in span_deaths. */
2710 num_loads
= MIN (web
->span_deaths
, web
->num_uses
);
2711 for (w
= 0, i
= 0; i
< web
->num_uses
; i
++)
2712 w
+= DF_REF_BB (web
->uses
[i
])->frequency
+ 1;
2713 if (num_loads
< web
->num_uses
)
2714 w
= (w
* num_loads
+ web
->num_uses
- 1) / web
->num_uses
;
2715 web
->spill_cost
= w
* load_cost
;
2718 for (w
= 0, i
= 0; i
< web
->num_defs
; i
++)
2719 w
+= DF_REF_BB (web
->defs
[i
])->frequency
+ 1;
2720 web
->spill_cost
+= w
* store_cost
;
2722 web
->orig_spill_cost
= web
->spill_cost
;
2726 /* Detect webs which are set in a conditional jump insn (possibly a
2727 decrement-and-branch type of insn), and mark them not to be
2728 spillable. The stores for them would need to be placed on edges,
2729 which destroys the CFG. (Somewhen we want to deal with that XXX) */
2732 detect_webs_set_in_cond_jump ()
2736 if (GET_CODE (bb
->end
) == JUMP_INSN
)
2738 struct df_link
*link
;
2739 for (link
= DF_INSN_DEFS (df
, bb
->end
); link
; link
= link
->next
)
2740 if (link
->ref
&& DF_REF_REGNO (link
->ref
) >= FIRST_PSEUDO_REGISTER
)
2742 struct web
*web
= def2web
[DF_REF_ID (link
->ref
)];
2743 web
->orig_spill_temp
= web
->spill_temp
= 3;
2748 /* Second top-level function of this file.
2749 Converts the connected web parts to full webs. This means, it allocates
2750 all webs, and initializes all fields, including detecting spill
2751 temporaries. It does not distribute moves to their corresponding webs,
2758 /* First build all the webs itself. They are not related with
2761 /* Now detect spill temporaries to initialize their usable_regs set. */
2762 detect_spill_temps ();
2763 detect_webs_set_in_cond_jump ();
2764 /* And finally relate them to each other, meaning to record all possible
2765 conflicts between webs (see the comment there). */
2766 conflicts_between_webs (df
);
2767 detect_remat_webs ();
2768 determine_web_costs ();
2771 /* Distribute moves to the corresponding webs. */
2777 struct df_link
*link
;
2778 struct move_list
*ml
;
2780 /* Distribute all moves to their corresponding webs, making sure,
2781 each move is in a web maximally one time (happens on some strange
2783 for (ml
= wl_moves
; ml
; ml
= ml
->next
)
2785 struct move
*m
= ml
->move
;
2787 struct move_list
*newml
;
2792 /* Multiple defs/uses can happen in moves involving hard-regs in
2793 a wider mode. For those df.* creates use/def references for each
2794 real hard-reg involved. For coalescing we are interested in
2795 the smallest numbered hard-reg. */
2796 for (link
= DF_INSN_DEFS (df
, m
->insn
); link
; link
= link
->next
)
2799 web
= def2web
[DF_REF_ID (link
->ref
)];
2800 web
= find_web_for_subweb (web
);
2801 if (!m
->target_web
|| web
->regno
< m
->target_web
->regno
)
2802 m
->target_web
= web
;
2804 for (link
= DF_INSN_USES (df
, m
->insn
); link
; link
= link
->next
)
2807 web
= use2web
[DF_REF_ID (link
->ref
)];
2808 web
= find_web_for_subweb (web
);
2809 if (!m
->source_web
|| web
->regno
< m
->source_web
->regno
)
2810 m
->source_web
= web
;
2812 if (m
->source_web
&& m
->target_web
2813 /* If the usable_regs don't intersect we can't coalesce the two
2814 webs anyway, as this is no simple copy insn (it might even
2815 need an intermediate stack temp to execute this "copy" insn). */
2816 && hard_regs_intersect_p (&m
->source_web
->usable_regs
,
2817 &m
->target_web
->usable_regs
))
2819 if (!flag_ra_optimistic_coalescing
)
2821 struct move_list
*test
= m
->source_web
->moves
;
2822 for (; test
&& test
->move
!= m
; test
= test
->next
);
2825 newml
= (struct move_list
*)
2826 ra_alloc (sizeof (struct move_list
));
2828 newml
->next
= m
->source_web
->moves
;
2829 m
->source_web
->moves
= newml
;
2831 test
= m
->target_web
->moves
;
2832 for (; test
&& test
->move
!= m
; test
= test
->next
);
2835 newml
= (struct move_list
*)
2836 ra_alloc (sizeof (struct move_list
));
2838 newml
->next
= m
->target_web
->moves
;
2839 m
->target_web
->moves
= newml
;
2844 /* Delete this move. */
2849 /* Handle tricky asm insns.
2850 Supposed to create conflicts to hardregs which aren't allowed in
2851 the constraints. Doesn't actually do that, as it might confuse
2852 and constrain the allocator too much. */
2855 handle_asm_insn (df
, insn
)
2859 const char *constraints
[MAX_RECOG_OPERANDS
];
2860 enum machine_mode operand_mode
[MAX_RECOG_OPERANDS
];
2861 int i
, noperands
, in_output
;
2862 HARD_REG_SET clobbered
, allowed
, conflict
;
2865 || (noperands
= asm_noperands (PATTERN (insn
))) < 0)
2867 pat
= PATTERN (insn
);
2868 CLEAR_HARD_REG_SET (clobbered
);
2870 if (GET_CODE (pat
) == PARALLEL
)
2871 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
2873 rtx t
= XVECEXP (pat
, 0, i
);
2874 if (GET_CODE (t
) == CLOBBER
&& GET_CODE (XEXP (t
, 0)) == REG
2875 && REGNO (XEXP (t
, 0)) < FIRST_PSEUDO_REGISTER
)
2876 SET_HARD_REG_BIT (clobbered
, REGNO (XEXP (t
, 0)));
2879 decode_asm_operands (pat
, recog_data
.operand
, recog_data
.operand_loc
,
2880 constraints
, operand_mode
);
2882 for (i
= 0; i
< noperands
; i
++)
2884 const char *p
= constraints
[i
];
2885 int cls
= (int) NO_REGS
;
2886 struct df_link
*link
;
2889 int nothing_allowed
= 1;
2890 reg
= recog_data
.operand
[i
];
2892 /* Look, if the constraints apply to a pseudo reg, and not to
2894 while (GET_CODE (reg
) == SUBREG
2895 || GET_CODE (reg
) == ZERO_EXTRACT
2896 || GET_CODE (reg
) == SIGN_EXTRACT
2897 || GET_CODE (reg
) == STRICT_LOW_PART
)
2898 reg
= XEXP (reg
, 0);
2899 if (GET_CODE (reg
) != REG
|| REGNO (reg
) < FIRST_PSEUDO_REGISTER
)
2902 /* Search the web corresponding to this operand. We depend on
2903 that decode_asm_operands() places the output operands
2904 before the input operands. */
2908 link
= df
->insns
[INSN_UID (insn
)].defs
;
2910 link
= df
->insns
[INSN_UID (insn
)].uses
;
2911 while (link
&& link
->ref
&& DF_REF_REAL_REG (link
->ref
) != reg
)
2913 if (!link
|| !link
->ref
)
2924 web
= def2web
[DF_REF_ID (link
->ref
)];
2926 web
= use2web
[DF_REF_ID (link
->ref
)];
2927 reg
= DF_REF_REG (link
->ref
);
2929 /* Find the constraints, noting the allowed hardregs in allowed. */
2930 CLEAR_HARD_REG_SET (allowed
);
2935 if (c
== '\0' || c
== ',' || c
== '#')
2937 /* End of one alternative - mark the regs in the current
2938 class, and reset the class. */
2940 IOR_HARD_REG_SET (allowed
, reg_class_contents
[cls
]);
2942 nothing_allowed
= 0;
2947 } while (c
!= '\0' && c
!= ',');
2955 case '=': case '+': case '*': case '%': case '?': case '!':
2956 case '0': case '1': case '2': case '3': case '4': case 'm':
2957 case '<': case '>': case 'V': case 'o': case '&': case 'E':
2958 case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
2959 case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
2964 cls
= (int) reg_class_subunion
[cls
][(int) BASE_REG_CLASS
];
2965 nothing_allowed
= 0;
2970 cls
= (int) reg_class_subunion
[cls
][(int) GENERAL_REGS
];
2971 nothing_allowed
= 0;
2976 (int) reg_class_subunion
[cls
][(int)
2977 REG_CLASS_FROM_CONSTRAINT (c
,
2980 p
+= CONSTRAINT_LEN (c
, p
);
2983 /* Now make conflicts between this web, and all hardregs, which
2984 are not allowed by the constraints. */
2985 if (nothing_allowed
)
2987 /* If we had no real constraints nothing was explicitly
2988 allowed, so we allow the whole class (i.e. we make no
2989 additional conflicts). */
2990 CLEAR_HARD_REG_SET (conflict
);
2994 COPY_HARD_REG_SET (conflict
, usable_regs
2995 [reg_preferred_class (web
->regno
)]);
2996 IOR_HARD_REG_SET (conflict
, usable_regs
2997 [reg_alternate_class (web
->regno
)]);
2998 AND_COMPL_HARD_REG_SET (conflict
, allowed
);
2999 /* We can't yet establish these conflicts. Reload must go first
3000 (or better said, we must implement some functionality of reload).
3001 E.g. if some operands must match, and they need the same color
3002 we don't see yet, that they do not conflict (because they match).
3003 For us it looks like two normal references with different DEFs,
3004 so they conflict, and as they both need the same color, the
3005 graph becomes uncolorable. */
3007 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
3008 if (TEST_HARD_REG_BIT (conflict
, c
))
3009 record_conflict (web
, hardreg2web
[c
]);
3015 ra_debug_msg (DUMP_ASM
, " ASM constrain Web %d conflicts with:", web
->id
);
3016 for (c
= 0; c
< FIRST_PSEUDO_REGISTER
; c
++)
3017 if (TEST_HARD_REG_BIT (conflict
, c
))
3018 ra_debug_msg (DUMP_ASM
, " %d", c
);
3019 ra_debug_msg (DUMP_ASM
, "\n");
3024 /* The real toplevel function in this file.
3025 Build (or rebuilds) the complete interference graph with webs
3034 init_web_parts (df
);
3036 sbitmap_zero (move_handled
);
3039 build_web_parts_and_conflicts (df
);
3041 /* For read-modify-write instructions we may have created two webs.
3042 Reconnect them here. (s.a.) */
3043 connect_rmw_web_parts (df
);
3045 /* The webs are conceptually complete now, but still scattered around as
3046 connected web parts. Collect all information and build the webs
3047 including all conflicts between webs (instead web parts). */
3051 /* Look for additional constraints given by asms. */
3052 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
3053 handle_asm_insn (df
, insn
);
3056 /* Allocates or reallocates most memory for the interference graph and
3057 associated structures. If it reallocates memory (meaning, this is not
3058 the first pass), this also changes some structures to reflect the
3059 additional entries in various array, and the higher number of
3063 ra_build_realloc (df
)
3066 struct web_part
*last_web_parts
= web_parts
;
3067 struct web
**last_def2web
= def2web
;
3068 struct web
**last_use2web
= use2web
;
3069 sbitmap last_live_over_abnormal
= live_over_abnormal
;
3072 move_handled
= sbitmap_alloc (get_max_uid () );
3073 web_parts
= (struct web_part
*) xcalloc (df
->def_id
+ df
->use_id
,
3074 sizeof web_parts
[0]);
3075 def2web
= (struct web
**) xcalloc (df
->def_id
+ df
->use_id
,
3077 use2web
= &def2web
[df
->def_id
];
3078 live_over_abnormal
= sbitmap_alloc (df
->use_id
);
3079 sbitmap_zero (live_over_abnormal
);
3081 /* First go through all old defs and uses. */
3082 for (i
= 0; i
< last_def_id
+ last_use_id
; i
++)
3084 /* And relocate them to the new array. This is made ugly by the
3085 fact, that defs and uses are placed consecutive into one array. */
3086 struct web_part
*dest
= &web_parts
[i
< last_def_id
3087 ? i
: (df
->def_id
+ i
- last_def_id
)];
3088 struct web_part
*up
;
3089 *dest
= last_web_parts
[i
];
3091 dest
->uplink
= NULL
;
3093 /* Also relocate the uplink to point into the new array. */
3096 unsigned int id
= DF_REF_ID (up
->ref
);
3097 if (up
< &last_web_parts
[last_def_id
])
3100 dest
->uplink
= &web_parts
[DF_REF_ID (up
->ref
)];
3102 else if (df
->uses
[id
])
3103 dest
->uplink
= &web_parts
[df
->def_id
+ DF_REF_ID (up
->ref
)];
3107 /* Also set up the def2web and use2web arrays, from the last pass.i
3108 Remember also the state of live_over_abnormal. */
3109 for (i
= 0; i
< last_def_id
; i
++)
3111 struct web
*web
= last_def2web
[i
];
3114 web
= find_web_for_subweb (web
);
3115 if (web
->type
!= FREE
&& web
->type
!= PRECOLORED
)
3116 def2web
[i
] = last_def2web
[i
];
3119 for (i
= 0; i
< last_use_id
; i
++)
3121 struct web
*web
= last_use2web
[i
];
3124 web
= find_web_for_subweb (web
);
3125 if (web
->type
!= FREE
&& web
->type
!= PRECOLORED
)
3126 use2web
[i
] = last_use2web
[i
];
3128 if (TEST_BIT (last_live_over_abnormal
, i
))
3129 SET_BIT (live_over_abnormal
, i
);
3132 /* We don't have any subwebs for now. Somewhen we might want to
3133 remember them too, instead of recreating all of them every time.
3134 The problem is, that which subwebs we need, depends also on what
3135 other webs and subwebs exist, and which conflicts are there.
3136 OTOH it should be no problem, if we had some more subwebs than strictly
3138 for (d
= WEBS(FREE
); d
; d
= d
->next
)
3140 struct web
*web
= DLIST_WEB (d
);
3142 for (web
= web
->subreg_next
; web
; web
= wnext
)
3144 wnext
= web
->subreg_next
;
3147 DLIST_WEB (d
)->subreg_next
= NULL
;
3150 /* The uses we anyway are going to check, are not yet live over an abnormal
3151 edge. In fact, they might actually not anymore, due to added
3153 if (last_check_uses
)
3154 sbitmap_difference (live_over_abnormal
, live_over_abnormal
,
3157 if (last_def_id
|| last_use_id
)
3159 sbitmap_free (last_live_over_abnormal
);
3160 free (last_web_parts
);
3161 free (last_def2web
);
3165 /* Setup copy cache, for copy_insn_p (). */
3166 copy_cache
= (struct copy_p_cache
*)
3167 xcalloc (get_max_uid (), sizeof (copy_cache
[0]));
3172 copy_cache
= (struct copy_p_cache
*)
3173 xrealloc (copy_cache
, get_max_uid () * sizeof (copy_cache
[0]));
3174 memset (©_cache
[last_max_uid
], 0,
3175 (get_max_uid () - last_max_uid
) * sizeof (copy_cache
[0]));
3179 /* Free up/clear some memory, only needed for one pass. */
3187 /* Clear the moves associated with a web (we also need to look into
3189 for (i
= 0; i
< num_webs
; i
++)
3191 struct web
*web
= ID2WEB (i
);
3194 if (i
>= num_webs
- num_subwebs
3195 && (web
->conflict_list
|| web
->orig_conflict_list
))
3199 /* All webs in the free list have no defs or uses anymore. */
3200 for (d
= WEBS(FREE
); d
; d
= d
->next
)
3202 struct web
*web
= DLIST_WEB (d
);
3209 /* We can't free the subwebs here, as they are referenced from
3210 def2web[], and possibly needed in the next ra_build_realloc().
3211 We free them there (or in free_all_mem()). */
3214 /* Free all conflict bitmaps from web parts. Note that we clear
3215 _all_ these conflicts, and don't rebuild them next time for uses
3216 which aren't rechecked. This mean, that those conflict bitmaps
3217 only contain the incremental information. The cumulative one
3218 is still contained in the edges of the I-graph, i.e. in
3219 conflict_list (or orig_conflict_list) of the webs. */
3220 for (i
= 0; i
< df
->def_id
+ df
->use_id
; i
++)
3222 struct tagged_conflict
*cl
;
3223 for (cl
= web_parts
[i
].sub_conflicts
; cl
; cl
= cl
->next
)
3226 BITMAP_XFREE (cl
->conflicts
);
3228 web_parts
[i
].sub_conflicts
= NULL
;
3234 free (move_handled
);
3235 sbitmap_free (sup_igraph
);
3236 sbitmap_free (igraph
);
3239 /* Free all memory for the interference graph structures. */
3242 ra_build_free_all (df
)
3250 for (i
= 0; i
< df
->def_id
+ df
->use_id
; i
++)
3252 struct tagged_conflict
*cl
;
3253 for (cl
= web_parts
[i
].sub_conflicts
; cl
; cl
= cl
->next
)
3256 BITMAP_XFREE (cl
->conflicts
);
3258 web_parts
[i
].sub_conflicts
= NULL
;
3260 sbitmap_free (live_over_abnormal
);
3263 if (last_check_uses
)
3264 sbitmap_free (last_check_uses
);
3265 last_check_uses
= NULL
;
3271 #include "gt-ra-build.h"
3274 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: