(PREDICATE_CODES): Remove duplicate defintion of seth_add3_operand.
[official-gcc.git] / gcc / ra-build.c
blob63fb24e05970bbebc8f369df60c026db6972bc8a
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "insn-config.h"
28 #include "recog.h"
29 #include "reload.h"
30 #include "function.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "df.h"
35 #include "output.h"
36 #include "ggc.h"
37 #include "ra.h"
39 /* This file is part of the graph coloring register allocator.
40 It deals with building the interference graph. When rebuilding
41 the graph for a function after spilling, we rebuild only those
42 parts needed, i.e. it works incrementally.
44 The first part (the functions called from build_web_parts_and_conflicts()
45 ) constructs a web_part for each pseudo register reference in the insn
46 stream, then goes backward from each use, until it reaches defs for that
47 pseudo. While going back it remember seen defs for other registers as
48 conflicts. By connecting the uses and defs, which reach each other, webs
49 (or live ranges) are built conceptually.
51 The second part (make_webs() and children) deals with converting that
52 structure to the nodes and edges, on which our interference graph is
53 built. For each root web part constructed above, an instance of struct
54 web is created. For all subregs of pseudos, which matter for allocation,
55 a subweb of the corresponding super web is built. Finally all the
56 conflicts noted in the first part (as bitmaps) are transformed into real
57 edges.
59 As part of that process the webs are also classified (their spill cost
60 is calculated, and if they are spillable at all, and if not, for what
61 reason; or if they are rematerializable), and move insns are collected,
62 which are potentially coalescable.
64 The top-level entry of this file (build_i_graph) puts it all together,
65 and leaves us with a complete interference graph, which just has to
66 be colored. */
69 struct curr_use;
71 static unsigned HOST_WIDE_INT rtx_to_undefined (rtx);
72 static bitmap find_sub_conflicts (struct web_part *, unsigned int);
73 static bitmap get_sub_conflicts (struct web_part *, unsigned int);
74 static unsigned int undef_to_size_word (rtx, unsigned HOST_WIDE_INT *);
75 static bitmap undef_to_bitmap (struct web_part *,
76 unsigned HOST_WIDE_INT *);
77 static struct web_part * find_web_part_1 (struct web_part *);
78 static struct web_part * union_web_part_roots
79 (struct web_part *, struct web_part *);
80 static int defuse_overlap_p_1 (rtx, struct curr_use *);
81 static int live_out_1 (struct df *, struct curr_use *, rtx);
82 static int live_out (struct df *, struct curr_use *, rtx);
83 static rtx live_in_edge ( struct df *, struct curr_use *, edge);
84 static void live_in (struct df *, struct curr_use *, rtx);
85 static int copy_insn_p (rtx, rtx *, rtx *);
86 static void remember_move (rtx);
87 static void handle_asm_insn (struct df *, rtx);
88 static void prune_hardregs_for_mode (HARD_REG_SET *, enum machine_mode);
89 static void init_one_web_common (struct web *, rtx);
90 static void init_one_web (struct web *, rtx);
91 static void reinit_one_web (struct web *, rtx);
92 static struct web * add_subweb (struct web *, rtx);
93 static struct web * add_subweb_2 (struct web *, unsigned int);
94 static void init_web_parts (struct df *);
95 static void copy_conflict_list (struct web *);
96 static void add_conflict_edge (struct web *, struct web *);
97 static void build_inverse_webs (struct web *);
98 static void copy_web (struct web *, struct web_link **);
99 static void compare_and_free_webs (struct web_link **);
100 static void init_webs_defs_uses (void);
101 static unsigned int parts_to_webs_1 (struct df *, struct web_link **,
102 struct df_link *);
103 static void parts_to_webs (struct df *);
104 static void reset_conflicts (void);
105 #if 0
106 static void check_conflict_numbers (void)
107 #endif
108 static void conflicts_between_webs (struct df *);
109 static void remember_web_was_spilled (struct web *);
110 static void detect_spill_temps (void);
111 static int contains_pseudo (rtx);
112 static int want_to_remat (rtx x);
113 static void detect_remat_webs (void);
114 static void determine_web_costs (void);
115 static void detect_webs_set_in_cond_jump (void);
116 static void make_webs (struct df *);
117 static void moves_to_webs (struct df *);
118 static void connect_rmw_web_parts (struct df *);
119 static void update_regnos_mentioned (void);
120 static void livethrough_conflicts_bb (basic_block);
121 static void init_bb_info (void);
122 static void free_bb_info (void);
123 static void build_web_parts_and_conflicts (struct df *);
126 /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
127 edge. */
128 static sbitmap live_over_abnormal;
130 /* To cache if we already saw a certain edge while analyzing one
131 use, we use a tick count incremented per use. */
132 static unsigned int visited_pass;
134 /* A sbitmap of UIDs of move insns, which we already analyzed. */
135 static sbitmap move_handled;
137 /* One such structed is allocated per insn, and traces for the currently
138 analyzed use, which web part belongs to it, and how many bytes of
139 it were still undefined when that insn was reached. */
140 struct visit_trace
142 struct web_part *wp;
143 unsigned HOST_WIDE_INT undefined;
145 /* Indexed by UID. */
146 static struct visit_trace *visit_trace;
148 /* Per basic block we have one such structure, used to speed up
149 the backtracing of uses. */
150 struct ra_bb_info
152 /* The value of visited_pass, as the first insn of this BB was reached
153 the last time. If this equals the current visited_pass, then
154 undefined is valid. Otherwise not. */
155 unsigned int pass;
156 /* The still undefined bytes at that time. The use to which this is
157 relative is the current use. */
158 unsigned HOST_WIDE_INT undefined;
159 /* Bit regno is set, if that regno is mentioned in this BB as a def, or
160 the source of a copy insn. In these cases we can not skip the whole
161 block if we reach it from the end. */
162 bitmap regnos_mentioned;
163 /* If a use reaches the end of a BB, and that BB doesn't mention its
164 regno, we skip the block, and remember the ID of that use
165 as living throughout the whole block. */
166 bitmap live_throughout;
167 /* The content of the aux field before placing a pointer to this
168 structure there. */
169 void *old_aux;
172 /* We need a fast way to describe a certain part of a register.
173 Therefore we put together the size and offset (in bytes) in one
174 integer. */
175 #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
176 #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
177 #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
179 /* For a REG or SUBREG expression X return the size/offset pair
180 as an integer. */
182 unsigned int
183 rtx_to_bits (rtx x)
185 unsigned int len, beg;
186 len = GET_MODE_SIZE (GET_MODE (x));
187 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
188 return BL_TO_WORD (beg, len);
191 /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
193 static unsigned HOST_WIDE_INT
194 rtx_to_undefined (rtx x)
196 unsigned int len, beg;
197 unsigned HOST_WIDE_INT ret;
198 len = GET_MODE_SIZE (GET_MODE (x));
199 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
200 ret = ~ ((unsigned HOST_WIDE_INT) 0);
201 ret = (~(ret << len)) << beg;
202 return ret;
205 /* We remember if we've analyzed an insn for being a move insn, and if yes
206 between which operands. */
207 struct copy_p_cache
209 int seen;
210 rtx source;
211 rtx target;
214 /* On demand cache, for if insns are copy insns, and if yes, what
215 source/target they have. */
216 static struct copy_p_cache * copy_cache;
218 int *number_seen;
220 /* For INSN, return nonzero, if it's a move insn, we consider to coalesce
221 later, and place the operands in *SOURCE and *TARGET, if they are
222 not NULL. */
224 static int
225 copy_insn_p (rtx insn, rtx *source, rtx *target)
227 rtx d, s;
228 unsigned int d_regno, s_regno;
229 int uid = INSN_UID (insn);
231 if (!INSN_P (insn))
232 abort ();
234 /* First look, if we already saw this insn. */
235 if (copy_cache[uid].seen)
237 /* And if we saw it, if it's actually a copy insn. */
238 if (copy_cache[uid].seen == 1)
240 if (source)
241 *source = copy_cache[uid].source;
242 if (target)
243 *target = copy_cache[uid].target;
244 return 1;
246 return 0;
249 /* Mark it as seen, but not being a copy insn. */
250 copy_cache[uid].seen = 2;
251 insn = single_set (insn);
252 if (!insn)
253 return 0;
254 d = SET_DEST (insn);
255 s = SET_SRC (insn);
257 /* We recognize moves between subreg's as copy insns. This is used to avoid
258 conflicts of those subwebs. But they are currently _not_ used for
259 coalescing (the check for this is in remember_move() below). */
260 while (GET_CODE (d) == STRICT_LOW_PART)
261 d = XEXP (d, 0);
262 if (!REG_P (d)
263 && (GET_CODE (d) != SUBREG || !REG_P (SUBREG_REG (d))))
264 return 0;
265 while (GET_CODE (s) == STRICT_LOW_PART)
266 s = XEXP (s, 0);
267 if (!REG_P (s)
268 && (GET_CODE (s) != SUBREG || !REG_P (SUBREG_REG (s))))
269 return 0;
271 s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
272 d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d);
274 /* Copies between hardregs are useless for us, as not coalesable anyway. */
275 if ((s_regno < FIRST_PSEUDO_REGISTER
276 && d_regno < FIRST_PSEUDO_REGISTER)
277 || s_regno >= max_normal_pseudo
278 || d_regno >= max_normal_pseudo)
279 return 0;
281 if (source)
282 *source = s;
283 if (target)
284 *target = d;
286 /* Still mark it as seen, but as a copy insn this time. */
287 copy_cache[uid].seen = 1;
288 copy_cache[uid].source = s;
289 copy_cache[uid].target = d;
290 return 1;
293 /* We build webs, as we process the conflicts. For each use we go upward
294 the insn stream, noting any defs as potentially conflicting with the
295 current use. We stop at defs of the current regno. The conflicts are only
296 potentially, because we may never reach a def, so this is an undefined use,
297 which conflicts with nothing. */
300 /* Given a web part WP, and the location of a reg part SIZE_WORD
301 return the conflict bitmap for that reg part, or NULL if it doesn't
302 exist yet in WP. */
304 static bitmap
305 find_sub_conflicts (struct web_part *wp, unsigned int size_word)
307 struct tagged_conflict *cl;
308 cl = wp->sub_conflicts;
309 for (; cl; cl = cl->next)
310 if (cl->size_word == size_word)
311 return cl->conflicts;
312 return NULL;
315 /* Similar to find_sub_conflicts(), but creates that bitmap, if it
316 doesn't exist. I.e. this never returns NULL. */
318 static bitmap
319 get_sub_conflicts (struct web_part *wp, unsigned int size_word)
321 bitmap b = find_sub_conflicts (wp, size_word);
322 if (!b)
324 struct tagged_conflict *cl = ra_alloc (sizeof *cl);
325 cl->conflicts = BITMAP_XMALLOC ();
326 cl->size_word = size_word;
327 cl->next = wp->sub_conflicts;
328 wp->sub_conflicts = cl;
329 b = cl->conflicts;
331 return b;
334 /* Helper table for undef_to_size_word() below for small values
335 of UNDEFINED. Offsets and lengths are byte based. */
336 static struct undef_table_s {
337 unsigned int new_undef;
338 /* size | (byte << 16) */
339 unsigned int size_word;
340 } const undef_table [] = {
341 { 0, BL_TO_WORD (0, 0)}, /* 0 */
342 { 0, BL_TO_WORD (0, 1)},
343 { 0, BL_TO_WORD (1, 1)},
344 { 0, BL_TO_WORD (0, 2)},
345 { 0, BL_TO_WORD (2, 1)}, /* 4 */
346 { 1, BL_TO_WORD (2, 1)},
347 { 2, BL_TO_WORD (2, 1)},
348 { 3, BL_TO_WORD (2, 1)},
349 { 0, BL_TO_WORD (3, 1)}, /* 8 */
350 { 1, BL_TO_WORD (3, 1)},
351 { 2, BL_TO_WORD (3, 1)},
352 { 3, BL_TO_WORD (3, 1)},
353 { 0, BL_TO_WORD (2, 2)}, /* 12 */
354 { 1, BL_TO_WORD (2, 2)},
355 { 2, BL_TO_WORD (2, 2)},
356 { 0, BL_TO_WORD (0, 4)}};
358 /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
359 A set bit means an undefined byte. Factor all undefined bytes into
360 groups, and return a size/ofs pair of consecutive undefined bytes,
361 but according to certain borders. Clear out those bits corresponding
362 to bytes overlaid by that size/ofs pair. REG is only used for
363 the mode, to detect if it's a floating mode or not.
365 For example: *UNDEFINED size+ofs new *UNDEFINED
366 1111 4+0 0
367 1100 2+2 0
368 1101 2+2 1
369 0001 1+0 0
370 10101 1+4 101
374 static unsigned int
375 undef_to_size_word (rtx reg, unsigned HOST_WIDE_INT *undefined)
377 /* When only the lower four bits are possibly set, we use
378 a fast lookup table. */
379 if (*undefined <= 15)
381 struct undef_table_s u;
382 u = undef_table[*undefined];
383 *undefined = u.new_undef;
384 return u.size_word;
387 /* Otherwise we handle certain cases directly. */
388 if (*undefined <= 0xffff)
389 switch ((int) *undefined)
391 case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4);
392 case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8);
393 case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4);
394 case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4);
395 case 0x0fff :
396 if (INTEGRAL_MODE_P (GET_MODE (reg)))
397 { *undefined = 0xff; return BL_TO_WORD (8, 4); }
398 else
399 { *undefined = 0; return BL_TO_WORD (0, 12); /* XFmode */ }
400 case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4);
401 case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8);
402 case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8);
403 case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16);
406 /* And if nothing matched fall back to the general solution. For
407 now unknown undefined bytes are converted to sequences of maximal
408 length 4 bytes. We could make this larger if necessary. */
410 unsigned HOST_WIDE_INT u = *undefined;
411 int word;
412 struct undef_table_s tab;
413 for (word = 0; (u & 15) == 0; word += 4)
414 u >>= 4;
415 u = u & 15;
416 tab = undef_table[u];
417 u = tab.new_undef;
418 u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word)) | (u << word);
419 *undefined = u;
420 /* Size remains the same, only the begin is moved up move bytes. */
421 return tab.size_word + BL_TO_WORD (word, 0);
425 /* Put the above three functions together. For a set of undefined bytes
426 as bitmap *UNDEFINED, look for (create if necessary) and return the
427 corresponding conflict bitmap. Change *UNDEFINED to remove the bytes
428 covered by the part for that bitmap. */
430 static bitmap
431 undef_to_bitmap (struct web_part *wp, unsigned HOST_WIDE_INT *undefined)
433 unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
434 undefined);
435 return get_sub_conflicts (wp, size_word);
438 /* Returns the root of the web part P is a member of. Additionally
439 it compresses the path. P may not be NULL. */
441 static struct web_part *
442 find_web_part_1 (struct web_part *p)
444 struct web_part *r = p;
445 struct web_part *p_next;
446 while (r->uplink)
447 r = r->uplink;
448 for (; p != r; p = p_next)
450 p_next = p->uplink;
451 p->uplink = r;
453 return r;
456 /* Fast macro for the common case (WP either being the root itself, or
457 the end of an already compressed path. */
459 #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
460 : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
462 /* Unions together the parts R1 resp. R2 is a root of.
463 All dynamic information associated with the parts (number of spanned insns
464 and so on) is also merged.
465 The root of the resulting (possibly larger) web part is returned. */
467 static struct web_part *
468 union_web_part_roots (struct web_part *r1, struct web_part *r2)
470 if (r1 != r2)
472 /* The new root is the smaller (pointerwise) of both. This is crucial
473 to make the construction of webs from web parts work (so, when
474 scanning all parts, we see the roots before all its children).
475 Additionally this ensures, that if the web has a def at all, than
476 the root is a def (because all def parts are before use parts in the
477 web_parts[] array), or put another way, as soon, as the root of a
478 web part is not a def, this is an uninitialized web part. The
479 way we construct the I-graph ensures, that if a web is initialized,
480 then the first part we find (besides trivial 1 item parts) has a
481 def. */
482 if (r1 > r2)
484 struct web_part *h = r1;
485 r1 = r2;
486 r2 = h;
488 r2->uplink = r1;
489 num_webs--;
491 /* Now we merge the dynamic information of R1 and R2. */
492 r1->spanned_deaths += r2->spanned_deaths;
494 if (!r1->sub_conflicts)
495 r1->sub_conflicts = r2->sub_conflicts;
496 else if (r2->sub_conflicts)
497 /* We need to merge the conflict bitmaps from R2 into R1. */
499 struct tagged_conflict *cl1, *cl2;
500 /* First those from R2, which are also contained in R1.
501 We union the bitmaps, and free those from R2, resetting them
502 to 0. */
503 for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next)
504 for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
505 if (cl1->size_word == cl2->size_word)
507 bitmap_operation (cl1->conflicts, cl1->conflicts,
508 cl2->conflicts, BITMAP_IOR);
509 BITMAP_XFREE (cl2->conflicts);
510 cl2->conflicts = NULL;
512 /* Now the conflict lists from R2 which weren't in R1.
513 We simply copy the entries from R2 into R1' list. */
514 for (cl2 = r2->sub_conflicts; cl2;)
516 struct tagged_conflict *cl_next = cl2->next;
517 if (cl2->conflicts)
519 cl2->next = r1->sub_conflicts;
520 r1->sub_conflicts = cl2;
522 cl2 = cl_next;
525 r2->sub_conflicts = NULL;
526 r1->crosses_call |= r2->crosses_call;
528 return r1;
531 /* Convenience macro, that is capable of unioning also non-roots. */
532 #define union_web_parts(p1, p2) \
533 ((p1 == p2) ? find_web_part (p1) \
534 : union_web_part_roots (find_web_part (p1), find_web_part (p2)))
536 /* Remember that we've handled a given move, so we don't reprocess it. */
538 static void
539 remember_move (rtx insn)
541 if (!TEST_BIT (move_handled, INSN_UID (insn)))
543 rtx s, d;
544 SET_BIT (move_handled, INSN_UID (insn));
545 if (copy_insn_p (insn, &s, &d))
547 /* Some sanity test for the copy insn. */
548 struct df_link *slink = DF_INSN_USES (df, insn);
549 struct df_link *link = DF_INSN_DEFS (df, insn);
550 if (!link || !link->ref || !slink || !slink->ref)
551 abort ();
552 /* The following (link->next != 0) happens when a hardreg
553 is used in wider mode (REG:DI %eax). Then df.* creates
554 a def/use for each hardreg contained therein. We only
555 allow hardregs here. */
556 if (link->next
557 && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER)
558 abort ();
560 else
561 abort ();
562 /* XXX for now we don't remember move insns involving any subregs.
563 Those would be difficult to coalesce (we would need to implement
564 handling of all the subwebs in the allocator, including that such
565 subwebs could be source and target of coalescing). */
566 if (REG_P (s) && REG_P (d))
568 struct move *m = ra_calloc (sizeof (struct move));
569 struct move_list *ml;
570 m->insn = insn;
571 ml = ra_alloc (sizeof (struct move_list));
572 ml->move = m;
573 ml->next = wl_moves;
574 wl_moves = ml;
579 /* This describes the USE currently looked at in the main-loop in
580 build_web_parts_and_conflicts(). */
581 struct curr_use {
582 struct web_part *wp;
583 /* This has a 1-bit for each byte in the USE, which is still undefined. */
584 unsigned HOST_WIDE_INT undefined;
585 /* For easy access. */
586 unsigned int regno;
587 rtx x;
588 /* If some bits of this USE are live over an abnormal edge. */
589 unsigned int live_over_abnormal;
592 /* Returns nonzero iff rtx DEF and USE have bits in common (but see below).
593 It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a)
594 x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide,
595 and a is a multi-word pseudo. If DEF or USE are hardregs, they are in
596 word_mode, so we don't need to check for further hardregs which would result
597 from wider references. We are never called with paradoxical subregs.
599 This returns:
600 0 for no common bits,
601 1 if DEF and USE exactly cover the same bytes,
602 2 if the DEF only covers a part of the bits of USE
603 3 if the DEF covers more than the bits of the USE, and
604 4 if both are SUBREG's of different size, but have bytes in common.
605 -1 is a special case, for when DEF and USE refer to the same regno, but
606 have for other reasons no bits in common (can only happen with
607 subregs referring to different words, or to words which already were
608 defined for this USE).
609 Furthermore it modifies use->undefined to clear the bits which get defined
610 by DEF (only for cases with partial overlap).
611 I.e. if bit 1 is set for the result != -1, the USE was completely covered,
612 otherwise a test is needed to track the already defined bytes. */
614 static int
615 defuse_overlap_p_1 (rtx def, struct curr_use *use)
617 int mode = 0;
618 if (def == use->x)
619 return 1;
620 if (!def)
621 return 0;
622 if (GET_CODE (def) == SUBREG)
624 if (REGNO (SUBREG_REG (def)) != use->regno)
625 return 0;
626 mode |= 1;
628 else if (REGNO (def) != use->regno)
629 return 0;
630 if (GET_CODE (use->x) == SUBREG)
631 mode |= 2;
632 switch (mode)
634 case 0: /* REG, REG */
635 return 1;
636 case 1: /* SUBREG, REG */
638 unsigned HOST_WIDE_INT old_u = use->undefined;
639 use->undefined &= ~ rtx_to_undefined (def);
640 return (old_u != use->undefined) ? 2 : -1;
642 case 2: /* REG, SUBREG */
643 return 3;
644 case 3: /* SUBREG, SUBREG */
645 if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x)))
646 /* If the size of both things is the same, the subreg's overlap
647 if they refer to the same word. */
648 if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
649 return 1;
650 /* Now the more difficult part: the same regno is referred, but the
651 sizes of the references or the words differ. E.g.
652 (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
653 overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
656 unsigned HOST_WIDE_INT old_u;
657 int b1, e1, b2, e2;
658 unsigned int bl1, bl2;
659 bl1 = rtx_to_bits (def);
660 bl2 = rtx_to_bits (use->x);
661 b1 = BYTE_BEGIN (bl1);
662 b2 = BYTE_BEGIN (bl2);
663 e1 = b1 + BYTE_LENGTH (bl1) - 1;
664 e2 = b2 + BYTE_LENGTH (bl2) - 1;
665 if (b1 > e2 || b2 > e1)
666 return -1;
667 old_u = use->undefined;
668 use->undefined &= ~ rtx_to_undefined (def);
669 return (old_u != use->undefined) ? 4 : -1;
671 default:
672 abort ();
676 /* Macro for the common case of either def and use having the same rtx,
677 or based on different regnos. */
678 #define defuse_overlap_p(def, use) \
679 ((def) == (use)->x ? 1 : \
680 (REGNO (GET_CODE (def) == SUBREG \
681 ? SUBREG_REG (def) : def) != use->regno \
682 ? 0 : defuse_overlap_p_1 (def, use)))
685 /* The use USE flows into INSN (backwards). Determine INSNs effect on it,
686 and return nonzero, if (parts of) that USE are also live before it.
687 This also notes conflicts between the USE and all DEFS in that insn,
688 and modifies the undefined bits of USE in case parts of it were set in
689 this insn. */
691 static int
692 live_out_1 (struct df *df ATTRIBUTE_UNUSED, struct curr_use *use, rtx insn)
694 int defined = 0;
695 int uid = INSN_UID (insn);
696 struct web_part *wp = use->wp;
698 /* Mark, that this insn needs this webpart live. */
699 visit_trace[uid].wp = wp;
700 visit_trace[uid].undefined = use->undefined;
702 if (INSN_P (insn))
704 unsigned int source_regno = ~0;
705 unsigned int regno = use->regno;
706 unsigned HOST_WIDE_INT orig_undef = use->undefined;
707 unsigned HOST_WIDE_INT final_undef = use->undefined;
708 rtx s = NULL;
709 unsigned int n, num_defs = insn_df[uid].num_defs;
710 struct ref **defs = insn_df[uid].defs;
712 /* We want to access the root webpart. */
713 wp = find_web_part (wp);
714 if (CALL_P (insn))
715 wp->crosses_call = 1;
716 else if (copy_insn_p (insn, &s, NULL))
717 source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
719 /* Look at all DEFS in this insn. */
720 for (n = 0; n < num_defs; n++)
722 struct ref *ref = defs[n];
723 int lap;
725 /* Reset the undefined bits for each iteration, in case this
726 insn has more than one set, and one of them sets this regno.
727 But still the original undefined part conflicts with the other
728 sets. */
729 use->undefined = orig_undef;
730 if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0)
732 if (lap == -1)
733 /* Same regnos but non-overlapping or already defined bits,
734 so ignore this DEF, or better said make the yet undefined
735 part and this DEF conflicting. */
737 unsigned HOST_WIDE_INT undef;
738 undef = use->undefined;
739 while (undef)
740 bitmap_set_bit (undef_to_bitmap (wp, &undef),
741 DF_REF_ID (ref));
742 continue;
744 if ((lap & 1) != 0)
745 /* The current DEF completely covers the USE, so we can
746 stop traversing the code looking for further DEFs. */
747 defined = 1;
748 else
749 /* We have a partial overlap. */
751 final_undef &= use->undefined;
752 if (final_undef == 0)
753 /* Now the USE is completely defined, which means, that
754 we can stop looking for former DEFs. */
755 defined = 1;
756 /* If this is a partial overlap, which left some bits
757 in USE undefined, we normally would need to create
758 conflicts between that undefined part and the part of
759 this DEF which overlapped with some of the formerly
760 undefined bits. We don't need to do this, because both
761 parts of this DEF (that which overlaps, and that which
762 doesn't) are written together in this one DEF, and can
763 not be colored in a way which would conflict with
764 the USE. This is only true for partial overlap,
765 because only then the DEF and USE have bits in common,
766 which makes the DEF move, if the USE moves, making them
767 aligned.
768 If they have no bits in common (lap == -1), they are
769 really independent. Therefore we there made a
770 conflict above. */
772 /* This is at least a partial overlap, so we need to union
773 the web parts. */
774 wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
776 else
778 /* The DEF and the USE don't overlap at all, different
779 regnos. I.e. make conflicts between the undefined bits,
780 and that DEF. */
781 unsigned HOST_WIDE_INT undef = use->undefined;
783 if (regno == source_regno)
784 /* This triggers only, when this was a copy insn and the
785 source is at least a part of the USE currently looked at.
786 In this case only the bits of the USE conflict with the
787 DEF, which are not covered by the source of this copy
788 insn, and which are still undefined. I.e. in the best
789 case (the whole reg being the source), _no_ conflicts
790 between that USE and this DEF (the target of the move)
791 are created by this insn (though they might be by
792 others). This is a super case of the normal copy insn
793 only between full regs. */
795 undef &= ~ rtx_to_undefined (s);
797 if (undef)
799 /*struct web_part *cwp;
800 cwp = find_web_part (&web_parts[DF_REF_ID
801 (ref)]);*/
803 /* TODO: somehow instead of noting the ID of the LINK
804 use an ID nearer to the root webpart of that LINK.
805 We can't use the root itself, because we later use the
806 ID to look at the form (reg or subreg, and if yes,
807 which subreg) of this conflict. This means, that we
808 need to remember in the root an ID for each form, and
809 maintaining this, when merging web parts. This makes
810 the bitmaps smaller. */
812 bitmap_set_bit (undef_to_bitmap (wp, &undef),
813 DF_REF_ID (ref));
814 while (undef);
818 if (defined)
819 use->undefined = 0;
820 else
822 /* If this insn doesn't completely define the USE, increment also
823 it's spanned deaths count (if this insn contains a death). */
824 if (uid >= death_insns_max_uid)
825 abort ();
826 if (TEST_BIT (insns_with_deaths, uid))
827 wp->spanned_deaths++;
828 use->undefined = final_undef;
832 return !defined;
835 /* Same as live_out_1() (actually calls it), but caches some information.
836 E.g. if we reached this INSN with the current regno already, and the
837 current undefined bits are a subset of those as we came here, we
838 simply connect the web parts of the USE, and the one cached for this
839 INSN, and additionally return zero, indicating we don't need to traverse
840 this path any longer (all effect were already seen, as we first reached
841 this insn). */
843 static inline int
844 live_out (struct df *df, struct curr_use *use, rtx insn)
846 unsigned int uid = INSN_UID (insn);
847 if (visit_trace[uid].wp
848 && DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno
849 && (use->undefined & ~visit_trace[uid].undefined) == 0)
851 union_web_parts (visit_trace[uid].wp, use->wp);
852 /* Don't search any further, as we already were here with this regno. */
853 return 0;
855 else
856 return live_out_1 (df, use, insn);
859 /* The current USE reached a basic block head. The edge E is one
860 of the predecessors edges. This evaluates the effect of the predecessor
861 block onto the USE, and returns the next insn, which should be looked at.
862 This either is the last insn of that pred. block, or the first one.
863 The latter happens, when the pred. block has no possible effect on the
864 USE, except for conflicts. In that case, it's remembered, that the USE
865 is live over that whole block, and it's skipped. Otherwise we simply
866 continue with the last insn of the block.
868 This also determines the effects of abnormal edges, and remembers
869 which uses are live at the end of that basic block. */
871 static rtx
872 live_in_edge (struct df *df, struct curr_use *use, edge e)
874 struct ra_bb_info *info_pred;
875 rtx next_insn;
876 /* Call used hard regs die over an exception edge, ergo
877 they don't reach the predecessor block, so ignore such
878 uses. And also don't set the live_over_abnormal flag
879 for them. */
880 if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER
881 && call_used_regs[use->regno])
882 return NULL_RTX;
883 if (e->flags & EDGE_ABNORMAL)
884 use->live_over_abnormal = 1;
885 bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
886 info_pred = (struct ra_bb_info *) e->src->aux;
887 next_insn = BB_END (e->src);
889 /* If the last insn of the pred. block doesn't completely define the
890 current use, we need to check the block. */
891 if (live_out (df, use, next_insn))
893 /* If the current regno isn't mentioned anywhere in the whole block,
894 and the complete use is still undefined... */
895 if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno)
896 && (rtx_to_undefined (use->x) & ~use->undefined) == 0)
898 /* ...we can hop over the whole block and defer conflict
899 creation to later. */
900 bitmap_set_bit (info_pred->live_throughout,
901 DF_REF_ID (use->wp->ref));
902 next_insn = BB_HEAD (e->src);
904 return next_insn;
906 else
907 return NULL_RTX;
910 /* USE flows into the end of the insns preceding INSN. Determine
911 their effects (in live_out()) and possibly loop over the preceding INSN,
912 or call itself recursively on a basic block border. When a topleve
913 call of this function returns the USE is completely analyzed. I.e.
914 its def-use chain (at least) is built, possibly connected with other
915 def-use chains, and all defs during that chain are noted. */
917 static void
918 live_in (struct df *df, struct curr_use *use, rtx insn)
920 unsigned int loc_vpass = visited_pass;
922 /* Note, that, even _if_ we are called with use->wp a root-part, this might
923 become non-root in the for() loop below (due to live_out() unioning
924 it). So beware, not to change use->wp in a way, for which only root-webs
925 are allowed. */
926 while (1)
928 int uid = INSN_UID (insn);
929 basic_block bb = BLOCK_FOR_INSN (insn);
930 number_seen[uid]++;
932 /* We want to be as fast as possible, so explicitly write
933 this loop. */
934 for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
935 insn = PREV_INSN (insn))
937 if (!insn)
938 return;
939 if (bb != BLOCK_FOR_INSN (insn))
941 edge e;
942 unsigned HOST_WIDE_INT undef = use->undefined;
943 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
944 if ((e = bb->pred) == NULL)
945 return;
946 /* We now check, if we already traversed the predecessors of this
947 block for the current pass and the current set of undefined
948 bits. If yes, we don't need to check the predecessors again.
949 So, conceptually this information is tagged to the first
950 insn of a basic block. */
951 if (info->pass == loc_vpass && (undef & ~info->undefined) == 0)
952 return;
953 info->pass = loc_vpass;
954 info->undefined = undef;
955 /* All but the last predecessor are handled recursively. */
956 for (; e->pred_next; e = e->pred_next)
958 insn = live_in_edge (df, use, e);
959 if (insn)
960 live_in (df, use, insn);
961 use->undefined = undef;
963 insn = live_in_edge (df, use, e);
964 if (!insn)
965 return;
967 else if (!live_out (df, use, insn))
968 return;
972 /* Determine all regnos which are mentioned in a basic block, in an
973 interesting way. Interesting here means either in a def, or as the
974 source of a move insn. We only look at insns added since the last
975 pass. */
977 static void
978 update_regnos_mentioned (void)
980 int last_uid = last_max_uid;
981 rtx insn;
982 basic_block bb;
983 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
984 if (INSN_P (insn))
986 /* Don't look at old insns. */
987 if (INSN_UID (insn) < last_uid)
989 /* XXX We should also remember moves over iterations (we already
990 save the cache, but not the movelist). */
991 if (copy_insn_p (insn, NULL, NULL))
992 remember_move (insn);
994 else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
996 rtx source;
997 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
998 bitmap mentioned = info->regnos_mentioned;
999 struct df_link *link;
1000 if (copy_insn_p (insn, &source, NULL))
1002 remember_move (insn);
1003 bitmap_set_bit (mentioned,
1004 REGNO (GET_CODE (source) == SUBREG
1005 ? SUBREG_REG (source) : source));
1007 for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
1008 if (link->ref)
1009 bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref));
1014 /* Handle the uses which reach a block end, but were deferred due
1015 to it's regno not being mentioned in that block. This adds the
1016 remaining conflicts and updates also the crosses_call and
1017 spanned_deaths members. */
1019 static void
1020 livethrough_conflicts_bb (basic_block bb)
1022 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1023 rtx insn;
1024 bitmap all_defs;
1025 int first, use_id;
1026 unsigned int deaths = 0;
1027 unsigned int contains_call = 0;
1029 /* If there are no deferred uses, just return. */
1030 if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
1031 return;
1033 /* First collect the IDs of all defs, count the number of death
1034 containing insns, and if there's some call_insn here. */
1035 all_defs = BITMAP_XMALLOC ();
1036 for (insn = BB_HEAD (bb); insn; insn = NEXT_INSN (insn))
1038 if (INSN_P (insn))
1040 unsigned int n;
1041 struct ra_insn_info info;
1043 info = insn_df[INSN_UID (insn)];
1044 for (n = 0; n < info.num_defs; n++)
1045 bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
1046 if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
1047 deaths++;
1048 if (CALL_P (insn))
1049 contains_call = 1;
1051 if (insn == BB_END (bb))
1052 break;
1055 /* And now, if we have found anything, make all live_through
1056 uses conflict with all defs, and update their other members. */
1057 if (deaths > 0
1058 || contains_call
1059 || bitmap_first_set_bit (all_defs) >= 0)
1060 EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id,
1062 struct web_part *wp = &web_parts[df->def_id + use_id];
1063 unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
1064 bitmap conflicts;
1065 wp = find_web_part (wp);
1066 wp->spanned_deaths += deaths;
1067 wp->crosses_call |= contains_call;
1068 conflicts = get_sub_conflicts (wp, bl);
1069 bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
1072 BITMAP_XFREE (all_defs);
1075 /* Allocate the per basic block info for traversing the insn stream for
1076 building live ranges. */
1078 static void
1079 init_bb_info (void)
1081 basic_block bb;
1082 FOR_ALL_BB (bb)
1084 struct ra_bb_info *info = xcalloc (1, sizeof *info);
1085 info->regnos_mentioned = BITMAP_XMALLOC ();
1086 info->live_throughout = BITMAP_XMALLOC ();
1087 info->old_aux = bb->aux;
1088 bb->aux = (void *) info;
1092 /* Free that per basic block info. */
1094 static void
1095 free_bb_info (void)
1097 basic_block bb;
1098 FOR_ALL_BB (bb)
1100 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1101 BITMAP_XFREE (info->regnos_mentioned);
1102 BITMAP_XFREE (info->live_throughout);
1103 bb->aux = info->old_aux;
1104 free (info);
1108 /* Toplevel function for the first part of this file.
1109 Connect web parts, thereby implicitly building webs, and remember
1110 their conflicts. */
1112 static void
1113 build_web_parts_and_conflicts (struct df *df)
1115 struct df_link *link;
1116 struct curr_use use;
1117 basic_block bb;
1119 number_seen = xcalloc (get_max_uid (), sizeof (int));
1120 visit_trace = xcalloc (get_max_uid (), sizeof (visit_trace[0]));
1121 update_regnos_mentioned ();
1123 /* Here's the main loop.
1124 It goes through all insn's, connects web parts along the way, notes
1125 conflicts between webparts, and remembers move instructions. */
1126 visited_pass = 0;
1127 for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++)
1128 if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno])
1129 for (link = df->regs[use.regno].uses; link; link = link->next)
1130 if (link->ref)
1132 struct ref *ref = link->ref;
1133 rtx insn = DF_REF_INSN (ref);
1134 /* Only recheck marked or new uses, or uses from hardregs. */
1135 if (use.regno >= FIRST_PSEUDO_REGISTER
1136 && DF_REF_ID (ref) < last_use_id
1137 && !TEST_BIT (last_check_uses, DF_REF_ID (ref)))
1138 continue;
1139 use.wp = &web_parts[df->def_id + DF_REF_ID (ref)];
1140 use.x = DF_REF_REG (ref);
1141 use.live_over_abnormal = 0;
1142 use.undefined = rtx_to_undefined (use.x);
1143 visited_pass++;
1144 live_in (df, &use, insn);
1145 if (use.live_over_abnormal)
1146 SET_BIT (live_over_abnormal, DF_REF_ID (ref));
1149 dump_number_seen ();
1150 FOR_ALL_BB (bb)
1152 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1153 livethrough_conflicts_bb (bb);
1154 bitmap_zero (info->live_throughout);
1155 info->pass = 0;
1157 free (visit_trace);
1158 free (number_seen);
1161 /* Here we look per insn, for DF references being in uses _and_ defs.
1162 This means, in the RTL a (REG xx) expression was seen as a
1163 read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...))
1164 e.g. Our code has created two webs for this, as it should. Unfortunately,
1165 as the REG reference is only one time in the RTL we can't color
1166 both webs different (arguably this also would be wrong for a real
1167 read-mod-write instruction), so we must reconnect such webs. */
1169 static void
1170 connect_rmw_web_parts (struct df *df)
1172 unsigned int i;
1174 for (i = 0; i < df->use_id; i++)
1176 struct web_part *wp1 = &web_parts[df->def_id + i];
1177 rtx reg;
1178 struct df_link *link;
1179 if (!wp1->ref)
1180 continue;
1181 /* If it's an uninitialized web, we don't want to connect it to others,
1182 as the read cycle in read-mod-write had probably no effect. */
1183 if (find_web_part (wp1) >= &web_parts[df->def_id])
1184 continue;
1185 reg = DF_REF_REAL_REG (wp1->ref);
1186 link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref));
1187 for (; link; link = link->next)
1188 if (reg == DF_REF_REAL_REG (link->ref))
1190 struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)];
1191 union_web_parts (wp1, wp2);
1196 /* Deletes all hardregs from *S which are not allowed for MODE. */
1198 static void
1199 prune_hardregs_for_mode (HARD_REG_SET *s, enum machine_mode mode)
1201 AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
1204 /* Initialize the members of a web, which are deducible from REG. */
1206 static void
1207 init_one_web_common (struct web *web, rtx reg)
1209 if (!REG_P (reg))
1210 abort ();
1211 /* web->id isn't initialized here. */
1212 web->regno = REGNO (reg);
1213 web->orig_x = reg;
1214 if (!web->dlink)
1216 web->dlink = ra_calloc (sizeof (struct dlist));
1217 DLIST_WEB (web->dlink) = web;
1219 /* XXX
1220 the former (superunion) doesn't constrain the graph enough. E.g.
1221 on x86 QImode _requires_ QI_REGS, but as alternate class usually
1222 GENERAL_REGS is given. So the graph is not constrained enough,
1223 thinking it has more freedom then it really has, which leads
1224 to repeated spill tryings. OTOH the latter (only using preferred
1225 class) is too constrained, as normally (e.g. with all SImode
1226 pseudos), they can be allocated also in the alternate class.
1227 What we really want, are the _exact_ hard regs allowed, not
1228 just a class. Later. */
1229 /*web->regclass = reg_class_superunion
1230 [reg_preferred_class (web->regno)]
1231 [reg_alternate_class (web->regno)];*/
1232 /*web->regclass = reg_preferred_class (web->regno);*/
1233 web->regclass = reg_class_subunion
1234 [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];
1235 web->regclass = reg_preferred_class (web->regno);
1236 if (web->regno < FIRST_PSEUDO_REGISTER)
1238 web->color = web->regno;
1239 put_web (web, PRECOLORED);
1240 web->num_conflicts = UINT_MAX;
1241 web->add_hardregs = 0;
1242 CLEAR_HARD_REG_SET (web->usable_regs);
1243 SET_HARD_REG_BIT (web->usable_regs, web->regno);
1244 web->num_freedom = 1;
1246 else
1248 HARD_REG_SET alternate;
1249 web->color = -1;
1250 put_web (web, INITIAL);
1251 /* add_hardregs is wrong in multi-length classes, e.g.
1252 using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS,
1253 where, if it finally is allocated to GENERAL_REGS it needs two,
1254 if allocated to FLOAT_REGS only one hardreg. XXX */
1255 web->add_hardregs =
1256 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
1257 web->num_conflicts = 0 * web->add_hardregs;
1258 COPY_HARD_REG_SET (web->usable_regs,
1259 reg_class_contents[reg_preferred_class (web->regno)]);
1260 COPY_HARD_REG_SET (alternate,
1261 reg_class_contents[reg_alternate_class (web->regno)]);
1262 IOR_HARD_REG_SET (web->usable_regs, alternate);
1263 /*IOR_HARD_REG_SET (web->usable_regs,
1264 reg_class_contents[reg_alternate_class
1265 (web->regno)]);*/
1266 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
1267 prune_hardregs_for_mode (&web->usable_regs,
1268 PSEUDO_REGNO_MODE (web->regno));
1269 #ifdef CANNOT_CHANGE_MODE_CLASS
1270 if (web->mode_changed)
1271 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
1272 #endif
1273 web->num_freedom = hard_regs_count (web->usable_regs);
1274 web->num_freedom -= web->add_hardregs;
1275 if (!web->num_freedom)
1276 abort();
1278 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
1281 /* Initializes WEBs members from REG or zero them. */
1283 static void
1284 init_one_web (struct web *web, rtx reg)
1286 memset (web, 0, sizeof (struct web));
1287 init_one_web_common (web, reg);
1288 web->useless_conflicts = BITMAP_XMALLOC ();
1291 /* WEB is an old web, meaning it came from the last pass, and got a
1292 color. We want to remember some of it's info, so zero only some
1293 members. */
1295 static void
1296 reinit_one_web (struct web *web, rtx reg)
1298 web->old_color = web->color + 1;
1299 init_one_web_common (web, reg);
1300 web->span_deaths = 0;
1301 web->spill_temp = 0;
1302 web->orig_spill_temp = 0;
1303 web->use_my_regs = 0;
1304 web->spill_cost = 0;
1305 web->was_spilled = 0;
1306 web->is_coalesced = 0;
1307 web->artificial = 0;
1308 web->live_over_abnormal = 0;
1309 web->mode_changed = 0;
1310 web->subreg_stripped = 0;
1311 web->move_related = 0;
1312 web->in_load = 0;
1313 web->target_of_spilled_move = 0;
1314 web->num_aliased = 0;
1315 if (web->type == PRECOLORED)
1317 web->num_defs = 0;
1318 web->num_uses = 0;
1319 web->orig_spill_cost = 0;
1321 CLEAR_HARD_REG_SET (web->bias_colors);
1322 CLEAR_HARD_REG_SET (web->prefer_colors);
1323 web->reg_rtx = NULL;
1324 web->stack_slot = NULL;
1325 web->pattern = NULL;
1326 web->alias = NULL;
1327 if (web->moves)
1328 abort ();
1329 if (!web->useless_conflicts)
1330 abort ();
1333 /* Insert and returns a subweb corresponding to REG into WEB (which
1334 becomes its super web). It must not exist already. */
1336 static struct web *
1337 add_subweb (struct web *web, rtx reg)
1339 struct web *w;
1340 if (GET_CODE (reg) != SUBREG)
1341 abort ();
1342 w = xmalloc (sizeof (struct web));
1343 /* Copy most content from parent-web. */
1344 *w = *web;
1345 /* And initialize the private stuff. */
1346 w->orig_x = reg;
1347 w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1;
1348 w->num_conflicts = 0 * w->add_hardregs;
1349 w->num_defs = 0;
1350 w->num_uses = 0;
1351 w->dlink = NULL;
1352 w->parent_web = web;
1353 w->subreg_next = web->subreg_next;
1354 web->subreg_next = w;
1355 return w;
1358 /* Similar to add_subweb(), but instead of relying on a given SUBREG,
1359 we have just a size and an offset of the subpart of the REG rtx.
1360 In difference to add_subweb() this marks the new subweb as artificial. */
1362 static struct web *
1363 add_subweb_2 (struct web *web, unsigned int size_word)
1365 /* To get a correct mode for the to be produced subreg, we don't want to
1366 simply do a mode_for_size() for the mode_class of the whole web.
1367 Suppose we deal with a CDImode web, but search for a 8 byte part.
1368 Now mode_for_size() would only search in the class MODE_COMPLEX_INT
1369 and would find CSImode which probably is not what we want. Instead
1370 we want DImode, which is in a completely other class. For this to work
1371 we instead first search the already existing subwebs, and take
1372 _their_ modeclasses as base for a search for ourself. */
1373 rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x;
1374 unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT;
1375 enum machine_mode mode;
1376 mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
1377 if (mode == BLKmode)
1378 mode = mode_for_size (size, MODE_INT, 0);
1379 if (mode == BLKmode)
1380 abort ();
1381 web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
1382 BYTE_BEGIN (size_word)));
1383 web->artificial = 1;
1384 return web;
1387 /* Initialize all the web parts we are going to need. */
1389 static void
1390 init_web_parts (struct df *df)
1392 int regno;
1393 unsigned int no;
1394 num_webs = 0;
1395 for (no = 0; no < df->def_id; no++)
1397 if (df->defs[no])
1399 if (no < last_def_id && web_parts[no].ref != df->defs[no])
1400 abort ();
1401 web_parts[no].ref = df->defs[no];
1402 /* Uplink might be set from the last iteration. */
1403 if (!web_parts[no].uplink)
1404 num_webs++;
1406 else
1407 /* The last iteration might have left .ref set, while df_analyze()
1408 removed that ref (due to a removed copy insn) from the df->defs[]
1409 array. As we don't check for that in realloc_web_parts()
1410 we do that here. */
1411 web_parts[no].ref = NULL;
1413 for (no = 0; no < df->use_id; no++)
1415 if (df->uses[no])
1417 if (no < last_use_id
1418 && web_parts[no + df->def_id].ref != df->uses[no])
1419 abort ();
1420 web_parts[no + df->def_id].ref = df->uses[no];
1421 if (!web_parts[no + df->def_id].uplink)
1422 num_webs++;
1424 else
1425 web_parts[no + df->def_id].ref = NULL;
1428 /* We want to have only one web for each precolored register. */
1429 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1431 struct web_part *r1 = NULL;
1432 struct df_link *link;
1433 /* Here once was a test, if there is any DEF at all, and only then to
1434 merge all the parts. This was incorrect, we really also want to have
1435 only one web-part for hardregs, even if there is no explicit DEF. */
1436 /* Link together all defs... */
1437 for (link = df->regs[regno].defs; link; link = link->next)
1438 if (link->ref)
1440 struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)];
1441 if (!r1)
1442 r1 = r2;
1443 else
1444 r1 = union_web_parts (r1, r2);
1446 /* ... and all uses. */
1447 for (link = df->regs[regno].uses; link; link = link->next)
1448 if (link->ref)
1450 struct web_part *r2 = &web_parts[df->def_id
1451 + DF_REF_ID (link->ref)];
1452 if (!r1)
1453 r1 = r2;
1454 else
1455 r1 = union_web_parts (r1, r2);
1460 /* In case we want to remember the conflict list of a WEB, before adding
1461 new conflicts, we copy it here to orig_conflict_list. */
1463 static void
1464 copy_conflict_list (struct web *web)
1466 struct conflict_link *cl;
1467 if (web->orig_conflict_list || web->have_orig_conflicts)
1468 abort ();
1469 web->have_orig_conflicts = 1;
1470 for (cl = web->conflict_list; cl; cl = cl->next)
1472 struct conflict_link *ncl;
1473 ncl = ra_alloc (sizeof *ncl);
1474 ncl->t = cl->t;
1475 ncl->sub = NULL;
1476 ncl->next = web->orig_conflict_list;
1477 web->orig_conflict_list = ncl;
1478 if (cl->sub)
1480 struct sub_conflict *sl, *nsl;
1481 for (sl = cl->sub; sl; sl = sl->next)
1483 nsl = ra_alloc (sizeof *nsl);
1484 nsl->s = sl->s;
1485 nsl->t = sl->t;
1486 nsl->next = ncl->sub;
1487 ncl->sub = nsl;
1493 /* Possibly add an edge from web FROM to TO marking a conflict between
1494 those two. This is one half of marking a complete conflict, which notes
1495 in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
1496 make other conflicts superfluous, because the current TO overlaps some web
1497 already being in conflict with FROM. In this case the smaller webs are
1498 deleted from the conflict list. Likewise if TO is overlapped by a web
1499 already in the list, it isn't added at all. Note, that this can only
1500 happen, if SUBREG webs are involved. */
1502 static void
1503 add_conflict_edge (struct web *from, struct web *to)
1505 if (from->type != PRECOLORED)
1507 struct web *pfrom = find_web_for_subweb (from);
1508 struct web *pto = find_web_for_subweb (to);
1509 struct sub_conflict *sl;
1510 struct conflict_link *cl = pfrom->conflict_list;
1511 int may_delete = 1;
1513 /* This can happen when subwebs of one web conflict with each
1514 other. In live_out_1() we created such conflicts between yet
1515 undefined webparts and defs of parts which didn't overlap with the
1516 undefined bits. Then later they nevertheless could have merged into
1517 one web, and then we land here. */
1518 if (pfrom == pto)
1519 return;
1520 if (remember_conflicts && !pfrom->have_orig_conflicts)
1521 copy_conflict_list (pfrom);
1522 if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
1524 cl = ra_alloc (sizeof (*cl));
1525 cl->t = pto;
1526 cl->sub = NULL;
1527 cl->next = pfrom->conflict_list;
1528 pfrom->conflict_list = cl;
1529 if (pto->type != SELECT && pto->type != COALESCED)
1530 pfrom->num_conflicts += 1 + pto->add_hardregs;
1531 SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id));
1532 may_delete = 0;
1534 else
1535 /* We don't need to test for cl==NULL, because at this point
1536 a cl with cl->t==pto is guaranteed to exist. */
1537 while (cl->t != pto)
1538 cl = cl->next;
1539 if (pfrom != from || pto != to)
1541 /* This is a subconflict which should be added.
1542 If we inserted cl in this invocation, we really need to add this
1543 subconflict. If we did _not_ add it here, we only add the
1544 subconflict, if cl already had subconflicts, because otherwise
1545 this indicated, that the whole webs already conflict, which
1546 means we are not interested in this subconflict. */
1547 if (!may_delete || cl->sub != NULL)
1549 sl = ra_alloc (sizeof (*sl));
1550 sl->s = from;
1551 sl->t = to;
1552 sl->next = cl->sub;
1553 cl->sub = sl;
1556 else
1557 /* pfrom == from && pto == to means, that we are not interested
1558 anymore in the subconflict list for this pair, because anyway
1559 the whole webs conflict. */
1560 cl->sub = NULL;
1564 /* Record a conflict between two webs, if we haven't recorded it
1565 already. */
1567 void
1568 record_conflict (struct web *web1, struct web *web2)
1570 unsigned int id1 = web1->id, id2 = web2->id;
1571 unsigned int index = igraph_index (id1, id2);
1572 /* Trivial non-conflict or already recorded conflict. */
1573 if (web1 == web2 || TEST_BIT (igraph, index))
1574 return;
1575 if (id1 == id2)
1576 abort ();
1577 /* As fixed_regs are no targets for allocation, conflicts with them
1578 are pointless. */
1579 if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
1580 || (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno]))
1581 return;
1582 /* Conflicts with hardregs, which are not even a candidate
1583 for this pseudo are also pointless. */
1584 if ((web1->type == PRECOLORED
1585 && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
1586 || (web2->type == PRECOLORED
1587 && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
1588 return;
1589 /* Similar if the set of possible hardregs don't intersect. This iteration
1590 those conflicts are useless (and would make num_conflicts wrong, because
1591 num_freedom is calculated from the set of possible hardregs).
1592 But in presence of spilling and incremental building of the graph we
1593 need to note all uses of webs conflicting with the spilled ones.
1594 Because the set of possible hardregs can change in the next round for
1595 spilled webs, we possibly have then conflicts with webs which would
1596 be excluded now (because then hardregs intersect). But we actually
1597 need to check those uses, and to get hold of them, we need to remember
1598 also webs conflicting with this one, although not conflicting in this
1599 round because of non-intersecting hardregs. */
1600 if (web1->type != PRECOLORED && web2->type != PRECOLORED
1601 && ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs))
1603 struct web *p1 = find_web_for_subweb (web1);
1604 struct web *p2 = find_web_for_subweb (web2);
1605 /* We expect these to be rare enough to justify bitmaps. And because
1606 we have only a special use for it, we note only the superwebs. */
1607 bitmap_set_bit (p1->useless_conflicts, p2->id);
1608 bitmap_set_bit (p2->useless_conflicts, p1->id);
1609 return;
1611 SET_BIT (igraph, index);
1612 add_conflict_edge (web1, web2);
1613 add_conflict_edge (web2, web1);
1616 /* For each web W this produces the missing subwebs Wx, such that it's
1617 possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
1619 static void
1620 build_inverse_webs (struct web *web)
1622 struct web *sweb = web->subreg_next;
1623 unsigned HOST_WIDE_INT undef;
1625 undef = rtx_to_undefined (web->orig_x);
1626 for (; sweb; sweb = sweb->subreg_next)
1627 /* Only create inverses of non-artificial webs. */
1628 if (!sweb->artificial)
1630 unsigned HOST_WIDE_INT bits;
1631 bits = undef & ~ rtx_to_undefined (sweb->orig_x);
1632 while (bits)
1634 unsigned int size_word = undef_to_size_word (web->orig_x, &bits);
1635 if (!find_subweb_2 (web, size_word))
1636 add_subweb_2 (web, size_word);
1641 /* Copies the content of WEB to a new one, and link it into WL.
1642 Used for consistency checking. */
1644 static void
1645 copy_web (struct web *web, struct web_link **wl)
1647 struct web *cweb = xmalloc (sizeof *cweb);
1648 struct web_link *link = ra_alloc (sizeof *link);
1649 link->next = *wl;
1650 *wl = link;
1651 link->web = cweb;
1652 *cweb = *web;
1655 /* Given a list of webs LINK, compare the content of the webs therein
1656 with the global webs of the same ID. For consistency checking. */
1658 static void
1659 compare_and_free_webs (struct web_link **link)
1661 struct web_link *wl;
1662 for (wl = *link; wl; wl = wl->next)
1664 struct web *web1 = wl->web;
1665 struct web *web2 = ID2WEB (web1->id);
1666 if (web1->regno != web2->regno
1667 || web1->mode_changed != web2->mode_changed
1668 || !rtx_equal_p (web1->orig_x, web2->orig_x)
1669 || web1->type != web2->type
1670 /* Only compare num_defs/num_uses with non-hardreg webs.
1671 E.g. the number of uses of the framepointer changes due to
1672 inserting spill code. */
1673 || (web1->type != PRECOLORED
1674 && (web1->num_uses != web2->num_uses
1675 || web1->num_defs != web2->num_defs))
1676 /* Similarly, if the framepointer was unreferenced originally
1677 but we added spills, these fields may not match. */
1678 || (web1->type != PRECOLORED
1679 && web1->crosses_call != web2->crosses_call)
1680 || (web1->type != PRECOLORED
1681 && web1->live_over_abnormal != web2->live_over_abnormal))
1682 abort ();
1683 if (web1->type != PRECOLORED)
1685 unsigned int i;
1686 for (i = 0; i < web1->num_defs; i++)
1687 if (web1->defs[i] != web2->defs[i])
1688 abort ();
1689 for (i = 0; i < web1->num_uses; i++)
1690 if (web1->uses[i] != web2->uses[i])
1691 abort ();
1693 if (web1->type == PRECOLORED)
1695 if (web1->defs)
1696 free (web1->defs);
1697 if (web1->uses)
1698 free (web1->uses);
1700 free (web1);
1702 *link = NULL;
1705 /* Setup and fill uses[] and defs[] arrays of the webs. */
1707 static void
1708 init_webs_defs_uses (void)
1710 struct dlist *d;
1711 for (d = WEBS(INITIAL); d; d = d->next)
1713 struct web *web = DLIST_WEB (d);
1714 unsigned int def_i, use_i;
1715 struct df_link *link;
1716 if (web->old_web)
1717 continue;
1718 if (web->type == PRECOLORED)
1720 web->num_defs = web->num_uses = 0;
1721 continue;
1723 if (web->num_defs)
1724 web->defs = xmalloc (web->num_defs * sizeof (web->defs[0]));
1725 if (web->num_uses)
1726 web->uses = xmalloc (web->num_uses * sizeof (web->uses[0]));
1727 def_i = use_i = 0;
1728 for (link = web->temp_refs; link; link = link->next)
1730 if (DF_REF_REG_DEF_P (link->ref))
1731 web->defs[def_i++] = link->ref;
1732 else
1733 web->uses[use_i++] = link->ref;
1735 web->temp_refs = NULL;
1736 if (def_i != web->num_defs || use_i != web->num_uses)
1737 abort ();
1741 /* Called by parts_to_webs(). This creates (or recreates) the webs (and
1742 subwebs) from web parts, gives them IDs (only to super webs), and sets
1743 up use2web and def2web arrays. */
1745 static unsigned int
1746 parts_to_webs_1 (struct df *df, struct web_link **copy_webs,
1747 struct df_link *all_refs)
1749 unsigned int i;
1750 unsigned int webnum;
1751 unsigned int def_id = df->def_id;
1752 unsigned int use_id = df->use_id;
1753 struct web_part *wp_first_use = &web_parts[def_id];
1755 /* For each root web part: create and initialize a new web,
1756 setup def2web[] and use2web[] for all defs and uses, and
1757 id2web for all new webs. */
1759 webnum = 0;
1760 for (i = 0; i < def_id + use_id; i++)
1762 struct web *subweb, *web = 0; /* Initialize web to silence warnings. */
1763 struct web_part *wp = &web_parts[i];
1764 struct ref *ref = wp->ref;
1765 unsigned int ref_id;
1766 rtx reg;
1767 if (!ref)
1768 continue;
1769 ref_id = i;
1770 if (i >= def_id)
1771 ref_id -= def_id;
1772 all_refs[i].ref = ref;
1773 reg = DF_REF_REG (ref);
1774 if (! wp->uplink)
1776 /* If we have a web part root, create a new web. */
1777 unsigned int newid = ~(unsigned)0;
1778 unsigned int old_web = 0;
1780 /* In the first pass, there are no old webs, so unconditionally
1781 allocate a new one. */
1782 if (ra_pass == 1)
1784 web = xmalloc (sizeof (struct web));
1785 newid = last_num_webs++;
1786 init_one_web (web, GET_CODE (reg) == SUBREG
1787 ? SUBREG_REG (reg) : reg);
1789 /* Otherwise, we look for an old web. */
1790 else
1792 /* Remember, that use2web == def2web + def_id.
1793 Ergo is def2web[i] == use2web[i - def_id] for i >= def_id.
1794 So we only need to look into def2web[] array.
1795 Try to look at the web, which formerly belonged to this
1796 def (or use). */
1797 web = def2web[i];
1798 /* Or which belonged to this hardreg. */
1799 if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER)
1800 web = hardreg2web[DF_REF_REGNO (ref)];
1801 if (web)
1803 /* If we found one, reuse it. */
1804 web = find_web_for_subweb (web);
1805 remove_list (web->dlink, &WEBS(INITIAL));
1806 old_web = 1;
1807 copy_web (web, copy_webs);
1809 else
1811 /* Otherwise use a new one. First from the free list. */
1812 if (WEBS(FREE))
1813 web = DLIST_WEB (pop_list (&WEBS(FREE)));
1814 else
1816 /* Else allocate a new one. */
1817 web = xmalloc (sizeof (struct web));
1818 newid = last_num_webs++;
1821 /* The id is zeroed in init_one_web(). */
1822 if (newid == ~(unsigned)0)
1823 newid = web->id;
1824 if (old_web)
1825 reinit_one_web (web, GET_CODE (reg) == SUBREG
1826 ? SUBREG_REG (reg) : reg);
1827 else
1828 init_one_web (web, GET_CODE (reg) == SUBREG
1829 ? SUBREG_REG (reg) : reg);
1830 web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0;
1832 web->span_deaths = wp->spanned_deaths;
1833 web->crosses_call = wp->crosses_call;
1834 web->id = newid;
1835 web->temp_refs = NULL;
1836 webnum++;
1837 if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno])
1838 hardreg2web[web->regno] = web;
1839 else if (web->regno < FIRST_PSEUDO_REGISTER
1840 && hardreg2web[web->regno] != web)
1841 abort ();
1844 /* If this reference already had a web assigned, we are done.
1845 This test better is equivalent to the web being an old web.
1846 Otherwise something is screwed. (This is tested) */
1847 if (def2web[i] != NULL)
1849 web = def2web[i];
1850 web = find_web_for_subweb (web);
1851 /* But if this ref includes a mode change, or was a use live
1852 over an abnormal call, set appropriate flags in the web. */
1853 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1854 && web->regno >= FIRST_PSEUDO_REGISTER)
1855 web->mode_changed = 1;
1856 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1857 && web->regno >= FIRST_PSEUDO_REGISTER)
1858 web->subreg_stripped = 1;
1859 if (i >= def_id
1860 && TEST_BIT (live_over_abnormal, ref_id))
1861 web->live_over_abnormal = 1;
1862 /* And check, that it's not a newly allocated web. This would be
1863 an inconsistency. */
1864 if (!web->old_web || web->type == PRECOLORED)
1865 abort ();
1866 continue;
1868 /* In case this was no web part root, we need to initialize WEB
1869 from the ref2web array belonging to the root. */
1870 if (wp->uplink)
1872 struct web_part *rwp = find_web_part (wp);
1873 unsigned int j = DF_REF_ID (rwp->ref);
1874 if (rwp < wp_first_use)
1875 web = def2web[j];
1876 else
1877 web = use2web[j];
1878 web = find_web_for_subweb (web);
1881 /* Remember all references for a web in a single linked list. */
1882 all_refs[i].next = web->temp_refs;
1883 web->temp_refs = &all_refs[i];
1885 /* And the test, that if def2web[i] was NULL above, that we are _not_
1886 an old web. */
1887 if (web->old_web && web->type != PRECOLORED)
1888 abort ();
1890 /* Possible create a subweb, if this ref was a subreg. */
1891 if (GET_CODE (reg) == SUBREG)
1893 subweb = find_subweb (web, reg);
1894 if (!subweb)
1896 subweb = add_subweb (web, reg);
1897 if (web->old_web)
1898 abort ();
1901 else
1902 subweb = web;
1904 /* And look, if the ref involves an invalid mode change. */
1905 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1906 && web->regno >= FIRST_PSEUDO_REGISTER)
1907 web->mode_changed = 1;
1908 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1909 && web->regno >= FIRST_PSEUDO_REGISTER)
1910 web->subreg_stripped = 1;
1912 /* Setup def2web, or use2web, and increment num_defs or num_uses. */
1913 if (i < def_id)
1915 /* Some sanity checks. */
1916 if (ra_pass > 1)
1918 struct web *compare = def2web[i];
1919 if (i < last_def_id)
1921 if (web->old_web && compare != subweb)
1922 abort ();
1924 if (!web->old_web && compare)
1925 abort ();
1926 if (compare && compare != subweb)
1927 abort ();
1929 def2web[i] = subweb;
1930 web->num_defs++;
1932 else
1934 if (ra_pass > 1)
1936 struct web *compare = use2web[ref_id];
1937 if (ref_id < last_use_id)
1939 if (web->old_web && compare != subweb)
1940 abort ();
1942 if (!web->old_web && compare)
1943 abort ();
1944 if (compare && compare != subweb)
1945 abort ();
1947 use2web[ref_id] = subweb;
1948 web->num_uses++;
1949 if (TEST_BIT (live_over_abnormal, ref_id))
1950 web->live_over_abnormal = 1;
1954 /* We better now have exactly as many webs as we had web part roots. */
1955 if (webnum != num_webs)
1956 abort ();
1958 return webnum;
1961 /* This builds full webs out of web parts, without relating them to each
1962 other (i.e. without creating the conflict edges). */
1964 static void
1965 parts_to_webs (struct df *df)
1967 unsigned int i;
1968 unsigned int webnum;
1969 struct web_link *copy_webs = NULL;
1970 struct dlist *d;
1971 struct df_link *all_refs;
1972 num_subwebs = 0;
1974 /* First build webs and ordinary subwebs. */
1975 all_refs = xcalloc (df->def_id + df->use_id, sizeof (all_refs[0]));
1976 webnum = parts_to_webs_1 (df, &copy_webs, all_refs);
1978 /* Setup the webs for hardregs which are still missing (weren't
1979 mentioned in the code). */
1980 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1981 if (!hardreg2web[i])
1983 struct web *web = xmalloc (sizeof (struct web));
1984 init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
1985 web->id = last_num_webs++;
1986 hardreg2web[web->regno] = web;
1988 num_webs = last_num_webs;
1990 /* Now create all artificial subwebs, i.e. those, which do
1991 not correspond to a real subreg in the current function's RTL, but
1992 which nevertheless is a target of a conflict.
1993 XXX we need to merge this loop with the one above, which means, we need
1994 a way to later override the artificiality. Beware: currently
1995 add_subweb_2() relies on the existence of normal subwebs for deducing
1996 a sane mode to use for the artificial subwebs. */
1997 for (i = 0; i < df->def_id + df->use_id; i++)
1999 struct web_part *wp = &web_parts[i];
2000 struct tagged_conflict *cl;
2001 struct web *web;
2002 if (wp->uplink || !wp->ref)
2004 if (wp->sub_conflicts)
2005 abort ();
2006 continue;
2008 web = def2web[i];
2009 web = find_web_for_subweb (web);
2010 for (cl = wp->sub_conflicts; cl; cl = cl->next)
2011 if (!find_subweb_2 (web, cl->size_word))
2012 add_subweb_2 (web, cl->size_word);
2015 /* And now create artificial subwebs needed for representing the inverse
2016 of some subwebs. This also gives IDs to all subwebs. */
2017 webnum = last_num_webs;
2018 for (d = WEBS(INITIAL); d; d = d->next)
2020 struct web *web = DLIST_WEB (d);
2021 if (web->subreg_next)
2023 struct web *sweb;
2024 build_inverse_webs (web);
2025 for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next)
2026 sweb->id = webnum++;
2030 /* Now that everyone has an ID, we can setup the id2web array. */
2031 id2web = xcalloc (webnum, sizeof (id2web[0]));
2032 for (d = WEBS(INITIAL); d; d = d->next)
2034 struct web *web = DLIST_WEB (d);
2035 ID2WEB (web->id) = web;
2036 for (web = web->subreg_next; web; web = web->subreg_next)
2037 ID2WEB (web->id) = web;
2039 num_subwebs = webnum - last_num_webs;
2040 num_allwebs = num_webs + num_subwebs;
2041 num_webs += num_subwebs;
2043 /* Allocate and clear the conflict graph bitmaps. */
2044 igraph = sbitmap_alloc (num_webs * num_webs / 2);
2045 sup_igraph = sbitmap_alloc (num_webs * num_webs);
2046 sbitmap_zero (igraph);
2047 sbitmap_zero (sup_igraph);
2049 /* Distribute the references to their webs. */
2050 init_webs_defs_uses ();
2051 /* And do some sanity checks if old webs, and those recreated from the
2052 really are the same. */
2053 compare_and_free_webs (&copy_webs);
2054 free (all_refs);
2057 /* This deletes all conflicts to and from webs which need to be renewed
2058 in this pass of the allocator, i.e. those which were spilled in the
2059 last pass. Furthermore it also rebuilds the bitmaps for the remaining
2060 conflicts. */
2062 static void
2063 reset_conflicts (void)
2065 unsigned int i;
2066 bitmap newwebs = BITMAP_XMALLOC ();
2067 for (i = 0; i < num_webs - num_subwebs; i++)
2069 struct web *web = ID2WEB (i);
2070 /* Hardreg webs and non-old webs are new webs (which
2071 need rebuilding). */
2072 if (web->type == PRECOLORED || !web->old_web)
2073 bitmap_set_bit (newwebs, web->id);
2076 for (i = 0; i < num_webs - num_subwebs; i++)
2078 struct web *web = ID2WEB (i);
2079 struct conflict_link *cl;
2080 struct conflict_link **pcl;
2081 pcl = &(web->conflict_list);
2083 /* First restore the conflict list to be like it was before
2084 coalescing. */
2085 if (web->have_orig_conflicts)
2087 web->conflict_list = web->orig_conflict_list;
2088 web->orig_conflict_list = NULL;
2090 if (web->orig_conflict_list)
2091 abort ();
2093 /* New non-precolored webs, have no conflict list. */
2094 if (web->type != PRECOLORED && !web->old_web)
2096 *pcl = NULL;
2097 /* Useless conflicts will be rebuilt completely. But check
2098 for cleanliness, as the web might have come from the
2099 free list. */
2100 if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
2101 abort ();
2103 else
2105 /* Useless conflicts with new webs will be rebuilt if they
2106 are still there. */
2107 bitmap_operation (web->useless_conflicts, web->useless_conflicts,
2108 newwebs, BITMAP_AND_COMPL);
2109 /* Go through all conflicts, and retain those to old webs. */
2110 for (cl = web->conflict_list; cl; cl = cl->next)
2112 if (cl->t->old_web || cl->t->type == PRECOLORED)
2114 *pcl = cl;
2115 pcl = &(cl->next);
2117 /* Also restore the entries in the igraph bitmaps. */
2118 web->num_conflicts += 1 + cl->t->add_hardregs;
2119 SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id));
2120 /* No subconflicts mean full webs conflict. */
2121 if (!cl->sub)
2122 SET_BIT (igraph, igraph_index (web->id, cl->t->id));
2123 else
2124 /* Else only the parts in cl->sub must be in the
2125 bitmap. */
2127 struct sub_conflict *sl;
2128 for (sl = cl->sub; sl; sl = sl->next)
2129 SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2133 *pcl = NULL;
2135 web->have_orig_conflicts = 0;
2137 BITMAP_XFREE (newwebs);
2140 /* For each web check it's num_conflicts member against that
2141 number, as calculated from scratch from all neighbors. */
2143 #if 0
2144 static void
2145 check_conflict_numbers (void)
2147 unsigned int i;
2148 for (i = 0; i < num_webs; i++)
2150 struct web *web = ID2WEB (i);
2151 int new_conf = 0 * web->add_hardregs;
2152 struct conflict_link *cl;
2153 for (cl = web->conflict_list; cl; cl = cl->next)
2154 if (cl->t->type != SELECT && cl->t->type != COALESCED)
2155 new_conf += 1 + cl->t->add_hardregs;
2156 if (web->type != PRECOLORED && new_conf != web->num_conflicts)
2157 abort ();
2160 #endif
2162 /* Convert the conflicts between web parts to conflicts between full webs.
2164 This can't be done in parts_to_webs(), because for recording conflicts
2165 between webs we need to know their final usable_regs set, which is used
2166 to discard non-conflicts (between webs having no hard reg in common).
2167 But this is set for spill temporaries only after the webs itself are
2168 built. Until then the usable_regs set is based on the pseudo regno used
2169 in this web, which may contain far less registers than later determined.
2170 This would result in us loosing conflicts (due to record_conflict()
2171 thinking that a web can only be allocated to the current usable_regs,
2172 whereas later this is extended) leading to colorings, where some regs which
2173 in reality conflict get the same color. */
2175 static void
2176 conflicts_between_webs (struct df *df)
2178 unsigned int i;
2179 struct dlist *d;
2180 bitmap ignore_defs = BITMAP_XMALLOC ();
2181 unsigned int have_ignored;
2182 unsigned int *pass_cache = xcalloc (num_webs, sizeof (int));
2183 unsigned int pass = 0;
2185 if (ra_pass > 1)
2186 reset_conflicts ();
2188 /* It is possible, that in the conflict bitmaps still some defs I are noted,
2189 which have web_parts[I].ref being NULL. This can happen, when from the
2190 last iteration the conflict bitmap for this part wasn't deleted, but a
2191 conflicting move insn was removed. It's DEF is still in the conflict
2192 bitmap, but it doesn't exist anymore in df->defs. To not have to check
2193 it in the tight loop below, we instead remember the ID's of them in a
2194 bitmap, and loop only over IDs which are not in it. */
2195 for (i = 0; i < df->def_id; i++)
2196 if (web_parts[i].ref == NULL)
2197 bitmap_set_bit (ignore_defs, i);
2198 have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
2200 /* Now record all conflicts between webs. Note that we only check
2201 the conflict bitmaps of all defs. Conflict bitmaps are only in
2202 webpart roots. If they are in uses, those uses are roots, which
2203 means, that this is an uninitialized web, whose conflicts
2204 don't matter. Nevertheless for hardregs we also need to check uses.
2205 E.g. hardregs used for argument passing have no DEF in the RTL,
2206 but if they have uses, they indeed conflict with all DEFs they
2207 overlap. */
2208 for (i = 0; i < df->def_id + df->use_id; i++)
2210 struct tagged_conflict *cl = web_parts[i].sub_conflicts;
2211 struct web *supweb1;
2212 if (!cl
2213 || (i >= df->def_id
2214 && DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER))
2215 continue;
2216 supweb1 = def2web[i];
2217 supweb1 = find_web_for_subweb (supweb1);
2218 for (; cl; cl = cl->next)
2219 if (cl->conflicts)
2221 int j;
2222 struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
2223 if (have_ignored)
2224 bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
2225 BITMAP_AND_COMPL);
2226 /* We reduce the number of calls to record_conflict() with this
2227 pass thing. record_conflict() itself also has some early-out
2228 optimizations, but here we can use the special properties of
2229 the loop (constant web1) to reduce that even more.
2230 We once used an sbitmap of already handled web indices,
2231 but sbitmaps are slow to clear and bitmaps are slow to
2232 set/test. The current approach needs more memory, but
2233 locality is large. */
2234 pass++;
2236 /* Note, that there are only defs in the conflicts bitset. */
2237 EXECUTE_IF_SET_IN_BITMAP (
2238 cl->conflicts, 0, j,
2240 struct web *web2 = def2web[j];
2241 unsigned int id2 = web2->id;
2242 if (pass_cache[id2] != pass)
2244 pass_cache[id2] = pass;
2245 record_conflict (web1, web2);
2251 free (pass_cache);
2252 BITMAP_XFREE (ignore_defs);
2254 for (d = WEBS(INITIAL); d; d = d->next)
2256 struct web *web = DLIST_WEB (d);
2257 int j;
2259 if (web->crosses_call)
2260 for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
2261 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, j))
2262 record_conflict (web, hardreg2web[j]);
2264 #ifdef STACK_REGS
2265 /* Pseudos can't go in stack regs if they are live at the beginning of
2266 a block that is reached by an abnormal edge. */
2267 if (web->live_over_abnormal)
2268 for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
2269 record_conflict (web, hardreg2web[j]);
2270 #endif
2274 /* Remember that a web was spilled, and change some characteristics
2275 accordingly. */
2277 static void
2278 remember_web_was_spilled (struct web *web)
2280 int i;
2281 unsigned int found_size = 0;
2282 int adjust;
2283 web->spill_temp = 1;
2285 /* From now on don't use reg_pref/alt_class (regno) anymore for
2286 this web, but instead usable_regs. We can't use spill_temp for
2287 this, as it might get reset later, when we are coalesced to a
2288 non-spill-temp. In that case we still want to use usable_regs. */
2289 web->use_my_regs = 1;
2291 /* We don't constrain spill temporaries in any way for now.
2292 It's wrong sometimes to have the same constraints or
2293 preferences as the original pseudo, esp. if they were very narrow.
2294 (E.g. there once was a reg wanting class AREG (only one register)
2295 without alternative class. As long, as also the spill-temps for
2296 this pseudo had the same constraints it was spilled over and over.
2297 Ideally we want some constraints also on spill-temps: Because they are
2298 not only loaded/stored, but also worked with, any constraints from insn
2299 alternatives needs applying. Currently this is dealt with by reload, as
2300 many other things, but at some time we want to integrate that
2301 functionality into the allocator. */
2302 if (web->regno >= max_normal_pseudo)
2304 COPY_HARD_REG_SET (web->usable_regs,
2305 reg_class_contents[reg_preferred_class (web->regno)]);
2306 IOR_HARD_REG_SET (web->usable_regs,
2307 reg_class_contents[reg_alternate_class (web->regno)]);
2309 else
2310 COPY_HARD_REG_SET (web->usable_regs,
2311 reg_class_contents[(int) GENERAL_REGS]);
2312 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
2313 prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
2314 #ifdef CANNOT_CHANGE_MODE_CLASS
2315 if (web->mode_changed)
2316 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
2317 #endif
2318 web->num_freedom = hard_regs_count (web->usable_regs);
2319 if (!web->num_freedom)
2320 abort();
2321 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
2322 /* Now look for a class, which is subset of our constraints, to
2323 setup add_hardregs, and regclass for debug output. */
2324 web->regclass = NO_REGS;
2325 for (i = (int) ALL_REGS - 1; i > 0; i--)
2327 unsigned int size;
2328 HARD_REG_SET test;
2329 COPY_HARD_REG_SET (test, reg_class_contents[i]);
2330 AND_COMPL_HARD_REG_SET (test, never_use_colors);
2331 GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found);
2332 continue;
2333 found:
2334 /* Measure the actual number of bits which really are overlapping
2335 the target regset, not just the reg_class_size. */
2336 size = hard_regs_count (test);
2337 if (found_size < size)
2339 web->regclass = (enum reg_class) i;
2340 found_size = size;
2344 adjust = 0 * web->add_hardregs;
2345 web->add_hardregs =
2346 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
2347 web->num_freedom -= web->add_hardregs;
2348 if (!web->num_freedom)
2349 abort();
2350 adjust -= 0 * web->add_hardregs;
2351 web->num_conflicts -= adjust;
2354 /* Look at each web, if it is used as spill web. Or better said,
2355 if it will be spillable in this pass. */
2357 static void
2358 detect_spill_temps (void)
2360 struct dlist *d;
2361 bitmap already = BITMAP_XMALLOC ();
2363 /* Detect webs used for spill temporaries. */
2364 for (d = WEBS(INITIAL); d; d = d->next)
2366 struct web *web = DLIST_WEB (d);
2368 /* Below only the detection of spill temporaries. We never spill
2369 precolored webs, so those can't be spill temporaries. The code above
2370 (remember_web_was_spilled) can't currently cope with hardregs
2371 anyway. */
2372 if (web->regno < FIRST_PSEUDO_REGISTER)
2373 continue;
2374 /* Uninitialized webs can't be spill-temporaries. */
2375 if (web->num_defs == 0)
2376 continue;
2378 /* A web with only defs and no uses can't be spilled. Nevertheless
2379 it must get a color, as it takes away a register from all webs
2380 live at these defs. So we make it a short web. */
2381 if (web->num_uses == 0)
2382 web->spill_temp = 3;
2383 /* A web which was spilled last time, but for which no insns were
2384 emitted (can happen with IR spilling ignoring sometimes
2385 all deaths). */
2386 else if (web->changed)
2387 web->spill_temp = 1;
2388 /* A spill temporary has one def, one or more uses, all uses
2389 are in one insn, and either the def or use insn was inserted
2390 by the allocator. */
2391 /* XXX not correct currently. There might also be spill temps
2392 involving more than one def. Usually that's an additional
2393 clobber in the using instruction. We might also constrain
2394 ourself to that, instead of like currently marking all
2395 webs involving any spill insns at all. */
2396 else
2398 unsigned int i;
2399 int spill_involved = 0;
2400 for (i = 0; i < web->num_uses && !spill_involved; i++)
2401 if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid)
2402 spill_involved = 1;
2403 for (i = 0; i < web->num_defs && !spill_involved; i++)
2404 if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid)
2405 spill_involved = 1;
2407 if (spill_involved/* && ra_pass > 2*/)
2409 int num_deaths = web->span_deaths;
2410 /* Mark webs involving at least one spill insn as
2411 spill temps. */
2412 remember_web_was_spilled (web);
2413 /* Search for insns which define and use the web in question
2414 at the same time, i.e. look for rmw insns. If these insns
2415 are also deaths of other webs they might have been counted
2416 as such into web->span_deaths. But because of the rmw nature
2417 of this insn it is no point where a load/reload could be
2418 placed successfully (it would still conflict with the
2419 dead web), so reduce the number of spanned deaths by those
2420 insns. Note that sometimes such deaths are _not_ counted,
2421 so negative values can result. */
2422 bitmap_zero (already);
2423 for (i = 0; i < web->num_defs; i++)
2425 rtx insn = web->defs[i]->insn;
2426 if (TEST_BIT (insns_with_deaths, INSN_UID (insn))
2427 && !bitmap_bit_p (already, INSN_UID (insn)))
2429 unsigned int j;
2430 bitmap_set_bit (already, INSN_UID (insn));
2431 /* Only decrement it once for each insn. */
2432 for (j = 0; j < web->num_uses; j++)
2433 if (web->uses[j]->insn == insn)
2435 num_deaths--;
2436 break;
2440 /* But mark them specially if they could possibly be spilled,
2441 either because they cross some deaths (without the above
2442 mentioned ones) or calls. */
2443 if (web->crosses_call || num_deaths > 0)
2444 web->spill_temp = 1 * 2;
2446 /* A web spanning no deaths can't be spilled either. No loads
2447 would be created for it, ergo no defs. So the insns wouldn't
2448 change making the graph not easier to color. Make this also
2449 a short web. Don't do this if it crosses calls, as these are
2450 also points of reloads. */
2451 else if (web->span_deaths == 0 && !web->crosses_call)
2452 web->spill_temp = 3;
2454 web->orig_spill_temp = web->spill_temp;
2456 BITMAP_XFREE (already);
2459 /* Returns nonzero if the rtx MEM refers somehow to a stack location. */
2462 memref_is_stack_slot (rtx mem)
2464 rtx ad = XEXP (mem, 0);
2465 rtx x;
2466 if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT)
2467 return 0;
2468 x = XEXP (ad, 0);
2469 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2470 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
2471 || x == stack_pointer_rtx)
2472 return 1;
2473 return 0;
2476 /* Returns nonzero, if rtx X somewhere contains any pseudo register. */
2478 static int
2479 contains_pseudo (rtx x)
2481 const char *fmt;
2482 int i;
2483 if (GET_CODE (x) == SUBREG)
2484 x = SUBREG_REG (x);
2485 if (REG_P (x))
2487 if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
2488 return 1;
2489 else
2490 return 0;
2493 fmt = GET_RTX_FORMAT (GET_CODE (x));
2494 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2495 if (fmt[i] == 'e')
2497 if (contains_pseudo (XEXP (x, i)))
2498 return 1;
2500 else if (fmt[i] == 'E')
2502 int j;
2503 for (j = 0; j < XVECLEN (x, i); j++)
2504 if (contains_pseudo (XVECEXP (x, i, j)))
2505 return 1;
2507 return 0;
2510 /* Returns nonzero, if we are able to rematerialize something with
2511 value X. If it's not a general operand, we test if we can produce
2512 a valid insn which set a pseudo to that value, and that insn doesn't
2513 clobber anything. */
2515 static GTY(()) rtx remat_test_insn;
2516 static int
2517 want_to_remat (rtx x)
2519 int num_clobbers = 0;
2520 int icode;
2522 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
2523 if (general_operand (x, GET_MODE (x)))
2524 return 1;
2526 /* Otherwise, check if we can make a valid insn from it. First initialize
2527 our test insn if we haven't already. */
2528 if (remat_test_insn == 0)
2530 remat_test_insn
2531 = make_insn_raw (gen_rtx_SET (VOIDmode,
2532 gen_rtx_REG (word_mode,
2533 FIRST_PSEUDO_REGISTER * 2),
2534 const0_rtx));
2535 NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0;
2538 /* Now make an insn like the one we would make when rematerializing
2539 the value X and see if valid. */
2540 PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x));
2541 SET_SRC (PATTERN (remat_test_insn)) = x;
2542 /* XXX For now we don't allow any clobbers to be added, not just no
2543 hardreg clobbers. */
2544 return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn,
2545 &num_clobbers)) >= 0
2546 && (num_clobbers == 0
2547 /*|| ! added_clobbers_hard_reg_p (icode)*/));
2550 /* Look at all webs, if they perhaps are rematerializable.
2551 They are, if all their defs are simple sets to the same value,
2552 and that value is simple enough, and want_to_remat() holds for it. */
2554 static void
2555 detect_remat_webs (void)
2557 struct dlist *d;
2558 for (d = WEBS(INITIAL); d; d = d->next)
2560 struct web *web = DLIST_WEB (d);
2561 unsigned int i;
2562 rtx pat = NULL_RTX;
2563 /* Hardregs and useless webs aren't spilled -> no remat necessary.
2564 Defless webs obviously also can't be rematerialized. */
2565 if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs
2566 || !web->num_uses)
2567 continue;
2568 for (i = 0; i < web->num_defs; i++)
2570 rtx insn;
2571 rtx set = single_set (insn = DF_REF_INSN (web->defs[i]));
2572 rtx src;
2573 if (!set)
2574 break;
2575 src = SET_SRC (set);
2576 /* When only subregs of the web are set it isn't easily
2577 rematerializable. */
2578 if (!rtx_equal_p (SET_DEST (set), web->orig_x))
2579 break;
2580 /* If we already have a pattern it must be equal to the current. */
2581 if (pat && !rtx_equal_p (pat, src))
2582 break;
2583 /* Don't do the expensive checks multiple times. */
2584 if (pat)
2585 continue;
2586 /* For now we allow only constant sources. */
2587 if ((CONSTANT_P (src)
2588 /* If the whole thing is stable already, it is a source for
2589 remat, no matter how complicated (probably all needed
2590 resources for it are live everywhere, and don't take
2591 additional register resources). */
2592 /* XXX Currently we can't use patterns which contain
2593 pseudos, _even_ if they are stable. The code simply isn't
2594 prepared for that. All those operands can't be spilled (or
2595 the dependent remat webs are not remat anymore), so they
2596 would be oldwebs in the next iteration. But currently
2597 oldwebs can't have their references changed. The
2598 incremental machinery barfs on that. */
2599 || (!rtx_unstable_p (src) && !contains_pseudo (src))
2600 /* Additionally also memrefs to stack-slots are useful, when
2601 we created them ourself. They might not have set their
2602 unchanging flag set, but nevertheless they are stable across
2603 the livetime in question. */
2604 || (MEM_P (src)
2605 && INSN_UID (insn) >= orig_max_uid
2606 && memref_is_stack_slot (src)))
2607 /* And we must be able to construct an insn without
2608 side-effects to actually load that value into a reg. */
2609 && want_to_remat (src))
2610 pat = src;
2611 else
2612 break;
2614 if (pat && i == web->num_defs)
2615 web->pattern = pat;
2619 /* Determine the spill costs of all webs. */
2621 static void
2622 determine_web_costs (void)
2624 struct dlist *d;
2625 for (d = WEBS(INITIAL); d; d = d->next)
2627 unsigned int i, num_loads;
2628 int load_cost, store_cost;
2629 unsigned HOST_WIDE_INT w;
2630 struct web *web = DLIST_WEB (d);
2631 if (web->type == PRECOLORED)
2632 continue;
2633 /* Get costs for one load/store. Note that we offset them by 1,
2634 because some patterns have a zero rtx_cost(), but we of course
2635 still need the actual load/store insns. With zero all those
2636 webs would be the same, no matter how often and where
2637 they are used. */
2638 if (web->pattern)
2640 /* This web is rematerializable. Beware, we set store_cost to
2641 zero optimistically assuming, that we indeed don't emit any
2642 stores in the spill-code addition. This might be wrong if
2643 at the point of the load not all needed resources are
2644 available, in which case we emit a stack-based load, for
2645 which we in turn need the according stores. */
2646 load_cost = 1 + rtx_cost (web->pattern, 0);
2647 store_cost = 0;
2649 else
2651 load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2652 web->regclass, 1);
2653 store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2654 web->regclass, 0);
2656 /* We create only loads at deaths, whose number is in span_deaths. */
2657 num_loads = MIN (web->span_deaths, web->num_uses);
2658 for (w = 0, i = 0; i < web->num_uses; i++)
2659 w += DF_REF_BB (web->uses[i])->frequency + 1;
2660 if (num_loads < web->num_uses)
2661 w = (w * num_loads + web->num_uses - 1) / web->num_uses;
2662 web->spill_cost = w * load_cost;
2663 if (store_cost)
2665 for (w = 0, i = 0; i < web->num_defs; i++)
2666 w += DF_REF_BB (web->defs[i])->frequency + 1;
2667 web->spill_cost += w * store_cost;
2669 web->orig_spill_cost = web->spill_cost;
2673 /* Detect webs which are set in a conditional jump insn (possibly a
2674 decrement-and-branch type of insn), and mark them not to be
2675 spillable. The stores for them would need to be placed on edges,
2676 which destroys the CFG. (Somewhen we want to deal with that XXX) */
2678 static void
2679 detect_webs_set_in_cond_jump (void)
2681 basic_block bb;
2682 FOR_EACH_BB (bb)
2683 if (JUMP_P (BB_END (bb)))
2685 struct df_link *link;
2686 for (link = DF_INSN_DEFS (df, BB_END (bb)); link; link = link->next)
2687 if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
2689 struct web *web = def2web[DF_REF_ID (link->ref)];
2690 web->orig_spill_temp = web->spill_temp = 3;
2695 /* Second top-level function of this file.
2696 Converts the connected web parts to full webs. This means, it allocates
2697 all webs, and initializes all fields, including detecting spill
2698 temporaries. It does not distribute moves to their corresponding webs,
2699 though. */
2701 static void
2702 make_webs (struct df *df)
2704 /* First build all the webs itself. They are not related with
2705 others yet. */
2706 parts_to_webs (df);
2707 /* Now detect spill temporaries to initialize their usable_regs set. */
2708 detect_spill_temps ();
2709 detect_webs_set_in_cond_jump ();
2710 /* And finally relate them to each other, meaning to record all possible
2711 conflicts between webs (see the comment there). */
2712 conflicts_between_webs (df);
2713 detect_remat_webs ();
2714 determine_web_costs ();
2717 /* Distribute moves to the corresponding webs. */
2719 static void
2720 moves_to_webs (struct df *df)
2722 struct df_link *link;
2723 struct move_list *ml;
2725 /* Distribute all moves to their corresponding webs, making sure,
2726 each move is in a web maximally one time (happens on some strange
2727 insns). */
2728 for (ml = wl_moves; ml; ml = ml->next)
2730 struct move *m = ml->move;
2731 struct web *web;
2732 struct move_list *newml;
2733 if (!m)
2734 continue;
2735 m->type = WORKLIST;
2736 m->dlink = NULL;
2737 /* Multiple defs/uses can happen in moves involving hard-regs in
2738 a wider mode. For those df.* creates use/def references for each
2739 real hard-reg involved. For coalescing we are interested in
2740 the smallest numbered hard-reg. */
2741 for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next)
2742 if (link->ref)
2744 web = def2web[DF_REF_ID (link->ref)];
2745 web = find_web_for_subweb (web);
2746 if (!m->target_web || web->regno < m->target_web->regno)
2747 m->target_web = web;
2749 for (link = DF_INSN_USES (df, m->insn); link; link = link->next)
2750 if (link->ref)
2752 web = use2web[DF_REF_ID (link->ref)];
2753 web = find_web_for_subweb (web);
2754 if (!m->source_web || web->regno < m->source_web->regno)
2755 m->source_web = web;
2757 if (m->source_web && m->target_web
2758 /* If the usable_regs don't intersect we can't coalesce the two
2759 webs anyway, as this is no simple copy insn (it might even
2760 need an intermediate stack temp to execute this "copy" insn). */
2761 && hard_regs_intersect_p (&m->source_web->usable_regs,
2762 &m->target_web->usable_regs))
2764 if (!flag_ra_optimistic_coalescing)
2766 struct move_list *test = m->source_web->moves;
2767 for (; test && test->move != m; test = test->next);
2768 if (! test)
2770 newml = ra_alloc (sizeof (struct move_list));
2771 newml->move = m;
2772 newml->next = m->source_web->moves;
2773 m->source_web->moves = newml;
2775 test = m->target_web->moves;
2776 for (; test && test->move != m; test = test->next);
2777 if (! test)
2779 newml = ra_alloc (sizeof (struct move_list));
2780 newml->move = m;
2781 newml->next = m->target_web->moves;
2782 m->target_web->moves = newml;
2786 else
2787 /* Delete this move. */
2788 ml->move = NULL;
2792 /* Handle tricky asm insns.
2793 Supposed to create conflicts to hardregs which aren't allowed in
2794 the constraints. Doesn't actually do that, as it might confuse
2795 and constrain the allocator too much. */
2797 static void
2798 handle_asm_insn (struct df *df, rtx insn)
2800 const char *constraints[MAX_RECOG_OPERANDS];
2801 enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
2802 int i, noperands, in_output;
2803 HARD_REG_SET clobbered, allowed, conflict;
2804 rtx pat;
2805 if (! INSN_P (insn)
2806 || (noperands = asm_noperands (PATTERN (insn))) < 0)
2807 return;
2808 pat = PATTERN (insn);
2809 CLEAR_HARD_REG_SET (clobbered);
2811 if (GET_CODE (pat) == PARALLEL)
2812 for (i = 0; i < XVECLEN (pat, 0); i++)
2814 rtx t = XVECEXP (pat, 0, i);
2815 if (GET_CODE (t) == CLOBBER && REG_P (XEXP (t, 0))
2816 && REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
2817 SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
2820 decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc,
2821 constraints, operand_mode);
2822 in_output = 1;
2823 for (i = 0; i < noperands; i++)
2825 const char *p = constraints[i];
2826 int cls = (int) NO_REGS;
2827 struct df_link *link;
2828 rtx reg;
2829 struct web *web;
2830 int nothing_allowed = 1;
2831 reg = recog_data.operand[i];
2833 /* Look, if the constraints apply to a pseudo reg, and not to
2834 e.g. a mem. */
2835 while (GET_CODE (reg) == SUBREG
2836 || GET_CODE (reg) == ZERO_EXTRACT
2837 || GET_CODE (reg) == SIGN_EXTRACT
2838 || GET_CODE (reg) == STRICT_LOW_PART)
2839 reg = XEXP (reg, 0);
2840 if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2841 continue;
2843 /* Search the web corresponding to this operand. We depend on
2844 that decode_asm_operands() places the output operands
2845 before the input operands. */
2846 while (1)
2848 if (in_output)
2849 link = df->insns[INSN_UID (insn)].defs;
2850 else
2851 link = df->insns[INSN_UID (insn)].uses;
2852 while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg)
2853 link = link->next;
2854 if (!link || !link->ref)
2856 if (in_output)
2857 in_output = 0;
2858 else
2859 abort ();
2861 else
2862 break;
2864 if (in_output)
2865 web = def2web[DF_REF_ID (link->ref)];
2866 else
2867 web = use2web[DF_REF_ID (link->ref)];
2868 reg = DF_REF_REG (link->ref);
2870 /* Find the constraints, noting the allowed hardregs in allowed. */
2871 CLEAR_HARD_REG_SET (allowed);
2872 while (1)
2874 char c = *p;
2876 if (c == '\0' || c == ',' || c == '#')
2878 /* End of one alternative - mark the regs in the current
2879 class, and reset the class. */
2880 p++;
2881 IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
2882 if (cls != NO_REGS)
2883 nothing_allowed = 0;
2884 cls = NO_REGS;
2885 if (c == '#')
2886 do {
2887 c = *p++;
2888 } while (c != '\0' && c != ',');
2889 if (c == '\0')
2890 break;
2891 continue;
2894 switch (c)
2896 case '=': case '+': case '*': case '%': case '?': case '!':
2897 case '0': case '1': case '2': case '3': case '4': case 'm':
2898 case '<': case '>': case 'V': case 'o': case '&': case 'E':
2899 case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
2900 case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
2901 case 'P':
2902 break;
2904 case 'p':
2905 cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
2906 nothing_allowed = 0;
2907 break;
2909 case 'g':
2910 case 'r':
2911 cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
2912 nothing_allowed = 0;
2913 break;
2915 default:
2916 cls =
2917 (int) reg_class_subunion[cls][(int)
2918 REG_CLASS_FROM_CONSTRAINT (c,
2919 p)];
2921 p += CONSTRAINT_LEN (c, p);
2924 /* Now make conflicts between this web, and all hardregs, which
2925 are not allowed by the constraints. */
2926 if (nothing_allowed)
2928 /* If we had no real constraints nothing was explicitly
2929 allowed, so we allow the whole class (i.e. we make no
2930 additional conflicts). */
2931 CLEAR_HARD_REG_SET (conflict);
2933 else
2935 COPY_HARD_REG_SET (conflict, usable_regs
2936 [reg_preferred_class (web->regno)]);
2937 IOR_HARD_REG_SET (conflict, usable_regs
2938 [reg_alternate_class (web->regno)]);
2939 AND_COMPL_HARD_REG_SET (conflict, allowed);
2940 /* We can't yet establish these conflicts. Reload must go first
2941 (or better said, we must implement some functionality of reload).
2942 E.g. if some operands must match, and they need the same color
2943 we don't see yet, that they do not conflict (because they match).
2944 For us it looks like two normal references with different DEFs,
2945 so they conflict, and as they both need the same color, the
2946 graph becomes uncolorable. */
2947 #if 0
2948 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
2949 if (TEST_HARD_REG_BIT (conflict, c))
2950 record_conflict (web, hardreg2web[c]);
2951 #endif
2953 if (dump_file)
2955 int c;
2956 ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
2957 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
2958 if (TEST_HARD_REG_BIT (conflict, c))
2959 ra_debug_msg (DUMP_ASM, " %d", c);
2960 ra_debug_msg (DUMP_ASM, "\n");
2965 /* The real toplevel function in this file.
2966 Build (or rebuilds) the complete interference graph with webs
2967 and conflicts. */
2969 void
2970 build_i_graph (struct df *df)
2972 rtx insn;
2974 init_web_parts (df);
2976 sbitmap_zero (move_handled);
2977 wl_moves = NULL;
2979 build_web_parts_and_conflicts (df);
2981 /* For read-modify-write instructions we may have created two webs.
2982 Reconnect them here. (s.a.) */
2983 connect_rmw_web_parts (df);
2985 /* The webs are conceptually complete now, but still scattered around as
2986 connected web parts. Collect all information and build the webs
2987 including all conflicts between webs (instead web parts). */
2988 make_webs (df);
2989 moves_to_webs (df);
2991 /* Look for additional constraints given by asms. */
2992 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2993 handle_asm_insn (df, insn);
2996 /* Allocates or reallocates most memory for the interference graph and
2997 associated structures. If it reallocates memory (meaning, this is not
2998 the first pass), this also changes some structures to reflect the
2999 additional entries in various array, and the higher number of
3000 defs and uses. */
3002 void
3003 ra_build_realloc (struct df *df)
3005 struct web_part *last_web_parts = web_parts;
3006 struct web **last_def2web = def2web;
3007 struct web **last_use2web = use2web;
3008 sbitmap last_live_over_abnormal = live_over_abnormal;
3009 unsigned int i;
3010 struct dlist *d;
3011 move_handled = sbitmap_alloc (get_max_uid () );
3012 web_parts = xcalloc (df->def_id + df->use_id, sizeof web_parts[0]);
3013 def2web = xcalloc (df->def_id + df->use_id, sizeof def2web[0]);
3014 use2web = &def2web[df->def_id];
3015 live_over_abnormal = sbitmap_alloc (df->use_id);
3016 sbitmap_zero (live_over_abnormal);
3018 /* First go through all old defs and uses. */
3019 for (i = 0; i < last_def_id + last_use_id; i++)
3021 /* And relocate them to the new array. This is made ugly by the
3022 fact, that defs and uses are placed consecutive into one array. */
3023 struct web_part *dest = &web_parts[i < last_def_id
3024 ? i : (df->def_id + i - last_def_id)];
3025 struct web_part *up;
3026 *dest = last_web_parts[i];
3027 up = dest->uplink;
3028 dest->uplink = NULL;
3030 /* Also relocate the uplink to point into the new array. */
3031 if (up && up->ref)
3033 unsigned int id = DF_REF_ID (up->ref);
3034 if (up < &last_web_parts[last_def_id])
3036 if (df->defs[id])
3037 dest->uplink = &web_parts[DF_REF_ID (up->ref)];
3039 else if (df->uses[id])
3040 dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)];
3044 /* Also set up the def2web and use2web arrays, from the last pass.i
3045 Remember also the state of live_over_abnormal. */
3046 for (i = 0; i < last_def_id; i++)
3048 struct web *web = last_def2web[i];
3049 if (web)
3051 web = find_web_for_subweb (web);
3052 if (web->type != FREE && web->type != PRECOLORED)
3053 def2web[i] = last_def2web[i];
3056 for (i = 0; i < last_use_id; i++)
3058 struct web *web = last_use2web[i];
3059 if (web)
3061 web = find_web_for_subweb (web);
3062 if (web->type != FREE && web->type != PRECOLORED)
3063 use2web[i] = last_use2web[i];
3065 if (TEST_BIT (last_live_over_abnormal, i))
3066 SET_BIT (live_over_abnormal, i);
3069 /* We don't have any subwebs for now. Somewhen we might want to
3070 remember them too, instead of recreating all of them every time.
3071 The problem is, that which subwebs we need, depends also on what
3072 other webs and subwebs exist, and which conflicts are there.
3073 OTOH it should be no problem, if we had some more subwebs than strictly
3074 needed. Later. */
3075 for (d = WEBS(FREE); d; d = d->next)
3077 struct web *web = DLIST_WEB (d);
3078 struct web *wnext;
3079 for (web = web->subreg_next; web; web = wnext)
3081 wnext = web->subreg_next;
3082 free (web);
3084 DLIST_WEB (d)->subreg_next = NULL;
3087 /* The uses we anyway are going to check, are not yet live over an abnormal
3088 edge. In fact, they might actually not anymore, due to added
3089 loads. */
3090 if (last_check_uses)
3091 sbitmap_difference (live_over_abnormal, live_over_abnormal,
3092 last_check_uses);
3094 if (last_def_id || last_use_id)
3096 sbitmap_free (last_live_over_abnormal);
3097 free (last_web_parts);
3098 free (last_def2web);
3100 if (!last_max_uid)
3102 /* Setup copy cache, for copy_insn_p (). */
3103 copy_cache = xcalloc (get_max_uid (), sizeof (copy_cache[0]));
3104 init_bb_info ();
3106 else
3108 copy_cache = xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
3109 memset (&copy_cache[last_max_uid], 0,
3110 (get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
3114 /* Free up/clear some memory, only needed for one pass. */
3116 void
3117 ra_build_free (void)
3119 struct dlist *d;
3120 unsigned int i;
3122 /* Clear the moves associated with a web (we also need to look into
3123 subwebs here). */
3124 for (i = 0; i < num_webs; i++)
3126 struct web *web = ID2WEB (i);
3127 if (!web)
3128 abort ();
3129 if (i >= num_webs - num_subwebs
3130 && (web->conflict_list || web->orig_conflict_list))
3131 abort ();
3132 web->moves = NULL;
3134 /* All webs in the free list have no defs or uses anymore. */
3135 for (d = WEBS(FREE); d; d = d->next)
3137 struct web *web = DLIST_WEB (d);
3138 if (web->defs)
3139 free (web->defs);
3140 web->defs = NULL;
3141 if (web->uses)
3142 free (web->uses);
3143 web->uses = NULL;
3144 /* We can't free the subwebs here, as they are referenced from
3145 def2web[], and possibly needed in the next ra_build_realloc().
3146 We free them there (or in free_all_mem()). */
3149 /* Free all conflict bitmaps from web parts. Note that we clear
3150 _all_ these conflicts, and don't rebuild them next time for uses
3151 which aren't rechecked. This mean, that those conflict bitmaps
3152 only contain the incremental information. The cumulative one
3153 is still contained in the edges of the I-graph, i.e. in
3154 conflict_list (or orig_conflict_list) of the webs. */
3155 for (i = 0; i < df->def_id + df->use_id; i++)
3157 struct tagged_conflict *cl;
3158 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3160 if (cl->conflicts)
3161 BITMAP_XFREE (cl->conflicts);
3163 web_parts[i].sub_conflicts = NULL;
3166 wl_moves = NULL;
3168 free (id2web);
3169 free (move_handled);
3170 sbitmap_free (sup_igraph);
3171 sbitmap_free (igraph);
3174 /* Free all memory for the interference graph structures. */
3176 void
3177 ra_build_free_all (struct df *df)
3179 unsigned int i;
3181 free_bb_info ();
3182 free (copy_cache);
3183 copy_cache = NULL;
3184 for (i = 0; i < df->def_id + df->use_id; i++)
3186 struct tagged_conflict *cl;
3187 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3189 if (cl->conflicts)
3190 BITMAP_XFREE (cl->conflicts);
3192 web_parts[i].sub_conflicts = NULL;
3194 sbitmap_free (live_over_abnormal);
3195 free (web_parts);
3196 web_parts = NULL;
3197 if (last_check_uses)
3198 sbitmap_free (last_check_uses);
3199 last_check_uses = NULL;
3200 free (def2web);
3201 use2web = NULL;
3202 def2web = NULL;
3205 #include "gt-ra-build.h"
3208 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: