ifcvt.c (noce_try_addcc): Do not call emit_conditional_add with weird operands.
[official-gcc.git] / gcc / ra-build.c
blob2272fa5998110ea2ed8d3c9600834b61d0cbae77
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "insn-config.h"
28 #include "recog.h"
29 #include "reload.h"
30 #include "function.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "df.h"
35 #include "output.h"
36 #include "ggc.h"
37 #include "ra.h"
39 /* This file is part of the graph coloring register allocator.
40 It deals with building the interference graph. When rebuilding
41 the graph for a function after spilling, we rebuild only those
42 parts needed, i.e. it works incrementally.
44 The first part (the functions called from build_web_parts_and_conflicts()
45 ) constructs a web_part for each pseudo register reference in the insn
46 stream, then goes backward from each use, until it reaches defs for that
47 pseudo. While going back it remember seen defs for other registers as
48 conflicts. By connecting the uses and defs, which reach each other, webs
49 (or live ranges) are built conceptually.
51 The second part (make_webs() and children) deals with converting that
52 structure to the nodes and edges, on which our interference graph is
53 built. For each root web part constructed above, an instance of struct
54 web is created. For all subregs of pseudos, which matter for allocation,
55 a subweb of the corresponding super web is built. Finally all the
56 conflicts noted in the first part (as bitmaps) are transformed into real
57 edges.
59 As part of that process the webs are also classified (their spill cost
60 is calculated, and if they are spillable at all, and if not, for what
61 reason; or if they are rematerializable), and move insns are collected,
62 which are potentially coalescable.
64 The top-level entry of this file (build_i_graph) puts it all together,
65 and leaves us with a complete interference graph, which just has to
66 be colored. */
69 struct curr_use;
71 static unsigned HOST_WIDE_INT rtx_to_undefined PARAMS ((rtx));
72 static bitmap find_sub_conflicts PARAMS ((struct web_part *, unsigned int));
73 static bitmap get_sub_conflicts PARAMS ((struct web_part *, unsigned int));
74 static unsigned int undef_to_size_word PARAMS ((rtx, unsigned HOST_WIDE_INT *));
75 static bitmap undef_to_bitmap PARAMS ((struct web_part *,
76 unsigned HOST_WIDE_INT *));
77 static struct web_part * find_web_part_1 PARAMS ((struct web_part *));
78 static struct web_part * union_web_part_roots
79 PARAMS ((struct web_part *, struct web_part *));
80 static int defuse_overlap_p_1 PARAMS ((rtx, struct curr_use *));
81 static int live_out_1 PARAMS ((struct df *, struct curr_use *, rtx));
82 static int live_out PARAMS ((struct df *, struct curr_use *, rtx));
83 static rtx live_in_edge PARAMS (( struct df *, struct curr_use *, edge));
84 static void live_in PARAMS ((struct df *, struct curr_use *, rtx));
85 static int copy_insn_p PARAMS ((rtx, rtx *, rtx *));
86 static void remember_move PARAMS ((rtx));
87 static void handle_asm_insn PARAMS ((struct df *, rtx));
88 static void prune_hardregs_for_mode PARAMS ((HARD_REG_SET *,
89 enum machine_mode));
90 static void init_one_web_common PARAMS ((struct web *, rtx));
91 static void init_one_web PARAMS ((struct web *, rtx));
92 static void reinit_one_web PARAMS ((struct web *, rtx));
93 static struct web * add_subweb PARAMS ((struct web *, rtx));
94 static struct web * add_subweb_2 PARAMS ((struct web *, unsigned int));
95 static void init_web_parts PARAMS ((struct df *));
96 static void copy_conflict_list PARAMS ((struct web *));
97 static void add_conflict_edge PARAMS ((struct web *, struct web *));
98 static void build_inverse_webs PARAMS ((struct web *));
99 static void copy_web PARAMS ((struct web *, struct web_link **));
100 static void compare_and_free_webs PARAMS ((struct web_link **));
101 static void init_webs_defs_uses PARAMS ((void));
102 static unsigned int parts_to_webs_1 PARAMS ((struct df *, struct web_link **,
103 struct df_link *));
104 static void parts_to_webs PARAMS ((struct df *));
105 static void reset_conflicts PARAMS ((void));
106 #if 0
107 static void check_conflict_numbers PARAMS ((void));
108 #endif
109 static void conflicts_between_webs PARAMS ((struct df *));
110 static void remember_web_was_spilled PARAMS ((struct web *));
111 static void detect_spill_temps PARAMS ((void));
112 static int contains_pseudo PARAMS ((rtx));
113 static int want_to_remat PARAMS ((rtx x));
114 static void detect_remat_webs PARAMS ((void));
115 static void determine_web_costs PARAMS ((void));
116 static void detect_webs_set_in_cond_jump PARAMS ((void));
117 static void make_webs PARAMS ((struct df *));
118 static void moves_to_webs PARAMS ((struct df *));
119 static void connect_rmw_web_parts PARAMS ((struct df *));
120 static void update_regnos_mentioned PARAMS ((void));
121 static void livethrough_conflicts_bb PARAMS ((basic_block));
122 static void init_bb_info PARAMS ((void));
123 static void free_bb_info PARAMS ((void));
124 static void build_web_parts_and_conflicts PARAMS ((struct df *));
127 /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
128 edge. */
129 static sbitmap live_over_abnormal;
131 /* To cache if we already saw a certain edge while analyzing one
132 use, we use a tick count incremented per use. */
133 static unsigned int visited_pass;
135 /* A sbitmap of UIDs of move insns, which we already analyzed. */
136 static sbitmap move_handled;
138 /* One such structed is allocated per insn, and traces for the currently
139 analyzed use, which web part belongs to it, and how many bytes of
140 it were still undefined when that insn was reached. */
141 struct visit_trace
143 struct web_part *wp;
144 unsigned HOST_WIDE_INT undefined;
146 /* Indexed by UID. */
147 static struct visit_trace *visit_trace;
149 /* Per basic block we have one such structure, used to speed up
150 the backtracing of uses. */
151 struct ra_bb_info
153 /* The value of visited_pass, as the first insn of this BB was reached
154 the last time. If this equals the current visited_pass, then
155 undefined is valid. Otherwise not. */
156 unsigned int pass;
157 /* The still undefined bytes at that time. The use to which this is
158 relative is the current use. */
159 unsigned HOST_WIDE_INT undefined;
160 /* Bit regno is set, if that regno is mentioned in this BB as a def, or
161 the source of a copy insn. In these cases we can not skip the whole
162 block if we reach it from the end. */
163 bitmap regnos_mentioned;
164 /* If a use reaches the end of a BB, and that BB doesn't mention its
165 regno, we skip the block, and remember the ID of that use
166 as living throughout the whole block. */
167 bitmap live_throughout;
168 /* The content of the aux field before placing a pointer to this
169 structure there. */
170 void *old_aux;
173 /* We need a fast way to describe a certain part of a register.
174 Therefore we put together the size and offset (in bytes) in one
175 integer. */
176 #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
177 #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
178 #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
180 /* For a REG or SUBREG expression X return the size/offset pair
181 as an integer. */
183 unsigned int
184 rtx_to_bits (x)
185 rtx x;
187 unsigned int len, beg;
188 len = GET_MODE_SIZE (GET_MODE (x));
189 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
190 return BL_TO_WORD (beg, len);
193 /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
195 static unsigned HOST_WIDE_INT
196 rtx_to_undefined (x)
197 rtx x;
199 unsigned int len, beg;
200 unsigned HOST_WIDE_INT ret;
201 len = GET_MODE_SIZE (GET_MODE (x));
202 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
203 ret = ~ ((unsigned HOST_WIDE_INT) 0);
204 ret = (~(ret << len)) << beg;
205 return ret;
208 /* We remember if we've analyzed an insn for being a move insn, and if yes
209 between which operands. */
210 struct copy_p_cache
212 int seen;
213 rtx source;
214 rtx target;
217 /* On demand cache, for if insns are copy insns, and if yes, what
218 source/target they have. */
219 static struct copy_p_cache * copy_cache;
221 int *number_seen;
223 /* For INSN, return nonzero, if it's a move insn, we consider to coalesce
224 later, and place the operands in *SOURCE and *TARGET, if they are
225 not NULL. */
227 static int
228 copy_insn_p (insn, source, target)
229 rtx insn;
230 rtx *source;
231 rtx *target;
233 rtx d, s;
234 unsigned int d_regno, s_regno;
235 int uid = INSN_UID (insn);
237 if (!INSN_P (insn))
238 abort ();
240 /* First look, if we already saw this insn. */
241 if (copy_cache[uid].seen)
243 /* And if we saw it, if it's actually a copy insn. */
244 if (copy_cache[uid].seen == 1)
246 if (source)
247 *source = copy_cache[uid].source;
248 if (target)
249 *target = copy_cache[uid].target;
250 return 1;
252 return 0;
255 /* Mark it as seen, but not being a copy insn. */
256 copy_cache[uid].seen = 2;
257 insn = single_set (insn);
258 if (!insn)
259 return 0;
260 d = SET_DEST (insn);
261 s = SET_SRC (insn);
263 /* We recognize moves between subreg's as copy insns. This is used to avoid
264 conflicts of those subwebs. But they are currently _not_ used for
265 coalescing (the check for this is in remember_move() below). */
266 while (GET_CODE (d) == STRICT_LOW_PART)
267 d = XEXP (d, 0);
268 if (GET_CODE (d) != REG
269 && (GET_CODE (d) != SUBREG || GET_CODE (SUBREG_REG (d)) != REG))
270 return 0;
271 while (GET_CODE (s) == STRICT_LOW_PART)
272 s = XEXP (s, 0);
273 if (GET_CODE (s) != REG
274 && (GET_CODE (s) != SUBREG || GET_CODE (SUBREG_REG (s)) != REG))
275 return 0;
277 s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
278 d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d);
280 /* Copies between hardregs are useless for us, as not coalesable anyway. */
281 if ((s_regno < FIRST_PSEUDO_REGISTER
282 && d_regno < FIRST_PSEUDO_REGISTER)
283 || s_regno >= max_normal_pseudo
284 || d_regno >= max_normal_pseudo)
285 return 0;
287 if (source)
288 *source = s;
289 if (target)
290 *target = d;
292 /* Still mark it as seen, but as a copy insn this time. */
293 copy_cache[uid].seen = 1;
294 copy_cache[uid].source = s;
295 copy_cache[uid].target = d;
296 return 1;
299 /* We build webs, as we process the conflicts. For each use we go upward
300 the insn stream, noting any defs as potentially conflicting with the
301 current use. We stop at defs of the current regno. The conflicts are only
302 potentially, because we may never reach a def, so this is an undefined use,
303 which conflicts with nothing. */
306 /* Given a web part WP, and the location of a reg part SIZE_WORD
307 return the conflict bitmap for that reg part, or NULL if it doesn't
308 exist yet in WP. */
310 static bitmap
311 find_sub_conflicts (wp, size_word)
312 struct web_part *wp;
313 unsigned int size_word;
315 struct tagged_conflict *cl;
316 cl = wp->sub_conflicts;
317 for (; cl; cl = cl->next)
318 if (cl->size_word == size_word)
319 return cl->conflicts;
320 return NULL;
323 /* Similar to find_sub_conflicts(), but creates that bitmap, if it
324 doesn't exist. I.e. this never returns NULL. */
326 static bitmap
327 get_sub_conflicts (wp, size_word)
328 struct web_part *wp;
329 unsigned int size_word;
331 bitmap b = find_sub_conflicts (wp, size_word);
332 if (!b)
334 struct tagged_conflict *cl =
335 (struct tagged_conflict *) ra_alloc (sizeof *cl);
336 cl->conflicts = BITMAP_XMALLOC ();
337 cl->size_word = size_word;
338 cl->next = wp->sub_conflicts;
339 wp->sub_conflicts = cl;
340 b = cl->conflicts;
342 return b;
345 /* Helper table for undef_to_size_word() below for small values
346 of UNDEFINED. Offsets and lengths are byte based. */
347 static struct undef_table_s {
348 unsigned int new_undef;
349 /* size | (byte << 16) */
350 unsigned int size_word;
351 } const undef_table [] = {
352 { 0, BL_TO_WORD (0, 0)}, /* 0 */
353 { 0, BL_TO_WORD (0, 1)},
354 { 0, BL_TO_WORD (1, 1)},
355 { 0, BL_TO_WORD (0, 2)},
356 { 0, BL_TO_WORD (2, 1)}, /* 4 */
357 { 1, BL_TO_WORD (2, 1)},
358 { 2, BL_TO_WORD (2, 1)},
359 { 3, BL_TO_WORD (2, 1)},
360 { 0, BL_TO_WORD (3, 1)}, /* 8 */
361 { 1, BL_TO_WORD (3, 1)},
362 { 2, BL_TO_WORD (3, 1)},
363 { 3, BL_TO_WORD (3, 1)},
364 { 0, BL_TO_WORD (2, 2)}, /* 12 */
365 { 1, BL_TO_WORD (2, 2)},
366 { 2, BL_TO_WORD (2, 2)},
367 { 0, BL_TO_WORD (0, 4)}};
369 /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
370 A set bit means an undefined byte. Factor all undefined bytes into
371 groups, and return a size/ofs pair of consecutive undefined bytes,
372 but according to certain borders. Clear out those bits corresponding
373 to bytes overlaid by that size/ofs pair. REG is only used for
374 the mode, to detect if it's a floating mode or not.
376 For example: *UNDEFINED size+ofs new *UNDEFINED
377 1111 4+0 0
378 1100 2+2 0
379 1101 2+2 1
380 0001 1+0 0
381 10101 1+4 101
385 static unsigned int
386 undef_to_size_word (reg, undefined)
387 rtx reg;
388 unsigned HOST_WIDE_INT *undefined;
390 /* When only the lower four bits are possibly set, we use
391 a fast lookup table. */
392 if (*undefined <= 15)
394 struct undef_table_s u;
395 u = undef_table[*undefined];
396 *undefined = u.new_undef;
397 return u.size_word;
400 /* Otherwise we handle certain cases directly. */
401 switch (*undefined)
403 case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4);
404 case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8);
405 case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4);
406 case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4);
407 case 0x0fff :
408 if (INTEGRAL_MODE_P (GET_MODE (reg)))
409 { *undefined = 0xff; return BL_TO_WORD (8, 4); }
410 else
411 { *undefined = 0; return BL_TO_WORD (0, 12); /* XFmode */ }
412 case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4);
413 case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8);
414 case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8);
415 case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16);
417 /* And if nothing matched fall back to the general solution.
418 For now unknown undefined bytes are converted to sequences
419 of maximal length 4 bytes. We could make this larger if
420 necessary. */
421 default :
423 unsigned HOST_WIDE_INT u = *undefined;
424 int word;
425 struct undef_table_s tab;
426 for (word = 0; (u & 15) == 0; word += 4)
427 u >>= 4;
428 u = u & 15;
429 tab = undef_table[u];
430 u = tab.new_undef;
431 u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word))
432 | (u << word);
433 *undefined = u;
434 /* Size remains the same, only the begin is moved up move bytes. */
435 return tab.size_word + BL_TO_WORD (word, 0);
437 break;
441 /* Put the above three functions together. For a set of undefined bytes
442 as bitmap *UNDEFINED, look for (create if necessary) and return the
443 corresponding conflict bitmap. Change *UNDEFINED to remove the bytes
444 covered by the part for that bitmap. */
446 static bitmap
447 undef_to_bitmap (wp, undefined)
448 struct web_part *wp;
449 unsigned HOST_WIDE_INT *undefined;
451 unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
452 undefined);
453 return get_sub_conflicts (wp, size_word);
456 /* Returns the root of the web part P is a member of. Additionally
457 it compresses the path. P may not be NULL. */
459 static struct web_part *
460 find_web_part_1 (p)
461 struct web_part *p;
463 struct web_part *r = p;
464 struct web_part *p_next;
465 while (r->uplink)
466 r = r->uplink;
467 for (; p != r; p = p_next)
469 p_next = p->uplink;
470 p->uplink = r;
472 return r;
475 /* Fast macro for the common case (WP either being the root itself, or
476 the end of an already compressed path. */
478 #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
479 : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
481 /* Unions together the parts R1 resp. R2 is a root of.
482 All dynamic information associated with the parts (number of spanned insns
483 and so on) is also merged.
484 The root of the resulting (possibly larger) web part is returned. */
486 static struct web_part *
487 union_web_part_roots (r1, r2)
488 struct web_part *r1, *r2;
490 if (r1 != r2)
492 /* The new root is the smaller (pointerwise) of both. This is crucial
493 to make the construction of webs from web parts work (so, when
494 scanning all parts, we see the roots before all its children).
495 Additionally this ensures, that if the web has a def at all, than
496 the root is a def (because all def parts are before use parts in the
497 web_parts[] array), or put another way, as soon, as the root of a
498 web part is not a def, this is an uninitialized web part. The
499 way we construct the I-graph ensures, that if a web is initialized,
500 then the first part we find (besides trivial 1 item parts) has a
501 def. */
502 if (r1 > r2)
504 struct web_part *h = r1;
505 r1 = r2;
506 r2 = h;
508 r2->uplink = r1;
509 num_webs--;
511 /* Now we merge the dynamic information of R1 and R2. */
512 r1->spanned_deaths += r2->spanned_deaths;
514 if (!r1->sub_conflicts)
515 r1->sub_conflicts = r2->sub_conflicts;
516 else if (r2->sub_conflicts)
517 /* We need to merge the conflict bitmaps from R2 into R1. */
519 struct tagged_conflict *cl1, *cl2;
520 /* First those from R2, which are also contained in R1.
521 We union the bitmaps, and free those from R2, resetting them
522 to 0. */
523 for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next)
524 for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
525 if (cl1->size_word == cl2->size_word)
527 bitmap_operation (cl1->conflicts, cl1->conflicts,
528 cl2->conflicts, BITMAP_IOR);
529 BITMAP_XFREE (cl2->conflicts);
530 cl2->conflicts = NULL;
532 /* Now the conflict lists from R2 which weren't in R1.
533 We simply copy the entries from R2 into R1' list. */
534 for (cl2 = r2->sub_conflicts; cl2;)
536 struct tagged_conflict *cl_next = cl2->next;
537 if (cl2->conflicts)
539 cl2->next = r1->sub_conflicts;
540 r1->sub_conflicts = cl2;
542 cl2 = cl_next;
545 r2->sub_conflicts = NULL;
546 r1->crosses_call |= r2->crosses_call;
548 return r1;
551 /* Convenience macro, that is capable of unioning also non-roots. */
552 #define union_web_parts(p1, p2) \
553 ((p1 == p2) ? find_web_part (p1) \
554 : union_web_part_roots (find_web_part (p1), find_web_part (p2)))
556 /* Remember that we've handled a given move, so we don't reprocess it. */
558 static void
559 remember_move (insn)
560 rtx insn;
562 if (!TEST_BIT (move_handled, INSN_UID (insn)))
564 rtx s, d;
565 SET_BIT (move_handled, INSN_UID (insn));
566 if (copy_insn_p (insn, &s, &d))
568 /* Some sanity test for the copy insn. */
569 struct df_link *slink = DF_INSN_USES (df, insn);
570 struct df_link *link = DF_INSN_DEFS (df, insn);
571 if (!link || !link->ref || !slink || !slink->ref)
572 abort ();
573 /* The following (link->next != 0) happens when a hardreg
574 is used in wider mode (REG:DI %eax). Then df.* creates
575 a def/use for each hardreg contained therein. We only
576 allow hardregs here. */
577 if (link->next
578 && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER)
579 abort ();
581 else
582 abort ();
583 /* XXX for now we don't remember move insns involving any subregs.
584 Those would be difficult to coalesce (we would need to implement
585 handling of all the subwebs in the allocator, including that such
586 subwebs could be source and target of coalescing). */
587 if (GET_CODE (s) == REG && GET_CODE (d) == REG)
589 struct move *m = (struct move *) ra_calloc (sizeof (struct move));
590 struct move_list *ml;
591 m->insn = insn;
592 ml = (struct move_list *) ra_alloc (sizeof (struct move_list));
593 ml->move = m;
594 ml->next = wl_moves;
595 wl_moves = ml;
600 /* This describes the USE currently looked at in the main-loop in
601 build_web_parts_and_conflicts(). */
602 struct curr_use {
603 struct web_part *wp;
604 /* This has a 1-bit for each byte in the USE, which is still undefined. */
605 unsigned HOST_WIDE_INT undefined;
606 /* For easy access. */
607 unsigned int regno;
608 rtx x;
609 /* If some bits of this USE are live over an abnormal edge. */
610 unsigned int live_over_abnormal;
613 /* Returns nonzero iff rtx DEF and USE have bits in common (but see below).
614 It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a)
615 x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide,
616 and a is a multi-word pseudo. If DEF or USE are hardregs, they are in
617 word_mode, so we don't need to check for further hardregs which would result
618 from wider references. We are never called with paradoxical subregs.
620 This returns:
621 0 for no common bits,
622 1 if DEF and USE exactly cover the same bytes,
623 2 if the DEF only covers a part of the bits of USE
624 3 if the DEF covers more than the bits of the USE, and
625 4 if both are SUBREG's of different size, but have bytes in common.
626 -1 is a special case, for when DEF and USE refer to the same regno, but
627 have for other reasons no bits in common (can only happen with
628 subregs refering to different words, or to words which already were
629 defined for this USE).
630 Furthermore it modifies use->undefined to clear the bits which get defined
631 by DEF (only for cases with partial overlap).
632 I.e. if bit 1 is set for the result != -1, the USE was completely covered,
633 otherwise a test is needed to track the already defined bytes. */
635 static int
636 defuse_overlap_p_1 (def, use)
637 rtx def;
638 struct curr_use *use;
640 int mode = 0;
641 if (def == use->x)
642 return 1;
643 if (!def)
644 return 0;
645 if (GET_CODE (def) == SUBREG)
647 if (REGNO (SUBREG_REG (def)) != use->regno)
648 return 0;
649 mode |= 1;
651 else if (REGNO (def) != use->regno)
652 return 0;
653 if (GET_CODE (use->x) == SUBREG)
654 mode |= 2;
655 switch (mode)
657 case 0: /* REG, REG */
658 return 1;
659 case 1: /* SUBREG, REG */
661 unsigned HOST_WIDE_INT old_u = use->undefined;
662 use->undefined &= ~ rtx_to_undefined (def);
663 return (old_u != use->undefined) ? 2 : -1;
665 case 2: /* REG, SUBREG */
666 return 3;
667 case 3: /* SUBREG, SUBREG */
668 if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x)))
669 /* If the size of both things is the same, the subreg's overlap
670 if they refer to the same word. */
671 if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
672 return 1;
673 /* Now the more difficult part: the same regno is refered, but the
674 sizes of the references or the words differ. E.g.
675 (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
676 overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
679 unsigned HOST_WIDE_INT old_u;
680 int b1, e1, b2, e2;
681 unsigned int bl1, bl2;
682 bl1 = rtx_to_bits (def);
683 bl2 = rtx_to_bits (use->x);
684 b1 = BYTE_BEGIN (bl1);
685 b2 = BYTE_BEGIN (bl2);
686 e1 = b1 + BYTE_LENGTH (bl1) - 1;
687 e2 = b2 + BYTE_LENGTH (bl2) - 1;
688 if (b1 > e2 || b2 > e1)
689 return -1;
690 old_u = use->undefined;
691 use->undefined &= ~ rtx_to_undefined (def);
692 return (old_u != use->undefined) ? 4 : -1;
694 default:
695 abort ();
699 /* Macro for the common case of either def and use having the same rtx,
700 or based on different regnos. */
701 #define defuse_overlap_p(def, use) \
702 ((def) == (use)->x ? 1 : \
703 (REGNO (GET_CODE (def) == SUBREG \
704 ? SUBREG_REG (def) : def) != use->regno \
705 ? 0 : defuse_overlap_p_1 (def, use)))
708 /* The use USE flows into INSN (backwards). Determine INSNs effect on it,
709 and return nonzero, if (parts of) that USE are also live before it.
710 This also notes conflicts between the USE and all DEFS in that insn,
711 and modifies the undefined bits of USE in case parts of it were set in
712 this insn. */
714 static int
715 live_out_1 (df, use, insn)
716 struct df *df ATTRIBUTE_UNUSED;
717 struct curr_use *use;
718 rtx insn;
720 int defined = 0;
721 int uid = INSN_UID (insn);
722 struct web_part *wp = use->wp;
724 /* Mark, that this insn needs this webpart live. */
725 visit_trace[uid].wp = wp;
726 visit_trace[uid].undefined = use->undefined;
728 if (INSN_P (insn))
730 unsigned int source_regno = ~0;
731 unsigned int regno = use->regno;
732 unsigned HOST_WIDE_INT orig_undef = use->undefined;
733 unsigned HOST_WIDE_INT final_undef = use->undefined;
734 rtx s = NULL;
735 unsigned int n, num_defs = insn_df[uid].num_defs;
736 struct ref **defs = insn_df[uid].defs;
738 /* We want to access the root webpart. */
739 wp = find_web_part (wp);
740 if (GET_CODE (insn) == CALL_INSN)
741 wp->crosses_call = 1;
742 else if (copy_insn_p (insn, &s, NULL))
743 source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
745 /* Look at all DEFS in this insn. */
746 for (n = 0; n < num_defs; n++)
748 struct ref *ref = defs[n];
749 int lap;
751 /* Reset the undefined bits for each iteration, in case this
752 insn has more than one set, and one of them sets this regno.
753 But still the original undefined part conflicts with the other
754 sets. */
755 use->undefined = orig_undef;
756 if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0)
758 if (lap == -1)
759 /* Same regnos but non-overlapping or already defined bits,
760 so ignore this DEF, or better said make the yet undefined
761 part and this DEF conflicting. */
763 unsigned HOST_WIDE_INT undef;
764 undef = use->undefined;
765 while (undef)
766 bitmap_set_bit (undef_to_bitmap (wp, &undef),
767 DF_REF_ID (ref));
768 continue;
770 if ((lap & 1) != 0)
771 /* The current DEF completely covers the USE, so we can
772 stop traversing the code looking for further DEFs. */
773 defined = 1;
774 else
775 /* We have a partial overlap. */
777 final_undef &= use->undefined;
778 if (final_undef == 0)
779 /* Now the USE is completely defined, which means, that
780 we can stop looking for former DEFs. */
781 defined = 1;
782 /* If this is a partial overlap, which left some bits
783 in USE undefined, we normally would need to create
784 conflicts between that undefined part and the part of
785 this DEF which overlapped with some of the formerly
786 undefined bits. We don't need to do this, because both
787 parts of this DEF (that which overlaps, and that which
788 doesn't) are written together in this one DEF, and can
789 not be colored in a way which would conflict with
790 the USE. This is only true for partial overlap,
791 because only then the DEF and USE have bits in common,
792 which makes the DEF move, if the USE moves, making them
793 aligned.
794 If they have no bits in common (lap == -1), they are
795 really independent. Therefore we there made a
796 conflict above. */
798 /* This is at least a partial overlap, so we need to union
799 the web parts. */
800 wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
802 else
804 /* The DEF and the USE don't overlap at all, different
805 regnos. I.e. make conflicts between the undefined bits,
806 and that DEF. */
807 unsigned HOST_WIDE_INT undef = use->undefined;
809 if (regno == source_regno)
810 /* This triggers only, when this was a copy insn and the
811 source is at least a part of the USE currently looked at.
812 In this case only the bits of the USE conflict with the
813 DEF, which are not covered by the source of this copy
814 insn, and which are still undefined. I.e. in the best
815 case (the whole reg being the source), _no_ conflicts
816 between that USE and this DEF (the target of the move)
817 are created by this insn (though they might be by
818 others). This is a super case of the normal copy insn
819 only between full regs. */
821 undef &= ~ rtx_to_undefined (s);
823 if (undef)
825 /*struct web_part *cwp;
826 cwp = find_web_part (&web_parts[DF_REF_ID
827 (ref)]);*/
829 /* TODO: somehow instead of noting the ID of the LINK
830 use an ID nearer to the root webpart of that LINK.
831 We can't use the root itself, because we later use the
832 ID to look at the form (reg or subreg, and if yes,
833 which subreg) of this conflict. This means, that we
834 need to remember in the root an ID for each form, and
835 maintaining this, when merging web parts. This makes
836 the bitmaps smaller. */
838 bitmap_set_bit (undef_to_bitmap (wp, &undef),
839 DF_REF_ID (ref));
840 while (undef);
844 if (defined)
845 use->undefined = 0;
846 else
848 /* If this insn doesn't completely define the USE, increment also
849 it's spanned deaths count (if this insn contains a death). */
850 if (uid >= death_insns_max_uid)
851 abort ();
852 if (TEST_BIT (insns_with_deaths, uid))
853 wp->spanned_deaths++;
854 use->undefined = final_undef;
858 return !defined;
861 /* Same as live_out_1() (actually calls it), but caches some information.
862 E.g. if we reached this INSN with the current regno already, and the
863 current undefined bits are a subset of those as we came here, we
864 simply connect the web parts of the USE, and the one cached for this
865 INSN, and additionally return zero, indicating we don't need to traverse
866 this path any longer (all effect were already seen, as we first reached
867 this insn). */
869 static inline int
870 live_out (df, use, insn)
871 struct df *df;
872 struct curr_use *use;
873 rtx insn;
875 unsigned int uid = INSN_UID (insn);
876 if (visit_trace[uid].wp
877 && DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno
878 && (use->undefined & ~visit_trace[uid].undefined) == 0)
880 union_web_parts (visit_trace[uid].wp, use->wp);
881 /* Don't search any further, as we already were here with this regno. */
882 return 0;
884 else
885 return live_out_1 (df, use, insn);
888 /* The current USE reached a basic block head. The edge E is one
889 of the predecessors edges. This evaluates the effect of the predecessor
890 block onto the USE, and returns the next insn, which should be looked at.
891 This either is the last insn of that pred. block, or the first one.
892 The latter happens, when the pred. block has no possible effect on the
893 USE, except for conflicts. In that case, it's remembered, that the USE
894 is live over that whole block, and it's skipped. Otherwise we simply
895 continue with the last insn of the block.
897 This also determines the effects of abnormal edges, and remembers
898 which uses are live at the end of that basic block. */
900 static rtx
901 live_in_edge (df, use, e)
902 struct df *df;
903 struct curr_use *use;
904 edge e;
906 struct ra_bb_info *info_pred;
907 rtx next_insn;
908 /* Call used hard regs die over an exception edge, ergo
909 they don't reach the predecessor block, so ignore such
910 uses. And also don't set the live_over_abnormal flag
911 for them. */
912 if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER
913 && call_used_regs[use->regno])
914 return NULL_RTX;
915 if (e->flags & EDGE_ABNORMAL)
916 use->live_over_abnormal = 1;
917 bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
918 info_pred = (struct ra_bb_info *) e->src->aux;
919 next_insn = e->src->end;
921 /* If the last insn of the pred. block doesn't completely define the
922 current use, we need to check the block. */
923 if (live_out (df, use, next_insn))
925 /* If the current regno isn't mentioned anywhere in the whole block,
926 and the complete use is still undefined... */
927 if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno)
928 && (rtx_to_undefined (use->x) & ~use->undefined) == 0)
930 /* ...we can hop over the whole block and defer conflict
931 creation to later. */
932 bitmap_set_bit (info_pred->live_throughout,
933 DF_REF_ID (use->wp->ref));
934 next_insn = e->src->head;
936 return next_insn;
938 else
939 return NULL_RTX;
942 /* USE flows into the end of the insns preceding INSN. Determine
943 their effects (in live_out()) and possibly loop over the preceding INSN,
944 or call itself recursively on a basic block border. When a topleve
945 call of this function returns the USE is completely analyzed. I.e.
946 its def-use chain (at least) is built, possibly connected with other
947 def-use chains, and all defs during that chain are noted. */
949 static void
950 live_in (df, use, insn)
951 struct df *df;
952 struct curr_use *use;
953 rtx insn;
955 unsigned int loc_vpass = visited_pass;
957 /* Note, that, even _if_ we are called with use->wp a root-part, this might
958 become non-root in the for() loop below (due to live_out() unioning
959 it). So beware, not to change use->wp in a way, for which only root-webs
960 are allowed. */
961 while (1)
963 int uid = INSN_UID (insn);
964 basic_block bb = BLOCK_FOR_INSN (insn);
965 number_seen[uid]++;
967 /* We want to be as fast as possible, so explicitly write
968 this loop. */
969 for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
970 insn = PREV_INSN (insn))
972 if (!insn)
973 return;
974 if (bb != BLOCK_FOR_INSN (insn))
976 edge e;
977 unsigned HOST_WIDE_INT undef = use->undefined;
978 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
979 if ((e = bb->pred) == NULL)
980 return;
981 /* We now check, if we already traversed the predecessors of this
982 block for the current pass and the current set of undefined
983 bits. If yes, we don't need to check the predecessors again.
984 So, conceptually this information is tagged to the first
985 insn of a basic block. */
986 if (info->pass == loc_vpass && (undef & ~info->undefined) == 0)
987 return;
988 info->pass = loc_vpass;
989 info->undefined = undef;
990 /* All but the last predecessor are handled recursively. */
991 for (; e->pred_next; e = e->pred_next)
993 insn = live_in_edge (df, use, e);
994 if (insn)
995 live_in (df, use, insn);
996 use->undefined = undef;
998 insn = live_in_edge (df, use, e);
999 if (!insn)
1000 return;
1002 else if (!live_out (df, use, insn))
1003 return;
1007 /* Determine all regnos which are mentioned in a basic block, in an
1008 interesting way. Interesting here means either in a def, or as the
1009 source of a move insn. We only look at insns added since the last
1010 pass. */
1012 static void
1013 update_regnos_mentioned ()
1015 int last_uid = last_max_uid;
1016 rtx insn;
1017 basic_block bb;
1018 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1019 if (INSN_P (insn))
1021 /* Don't look at old insns. */
1022 if (INSN_UID (insn) < last_uid)
1024 /* XXX We should also remember moves over iterations (we already
1025 save the cache, but not the movelist). */
1026 if (copy_insn_p (insn, NULL, NULL))
1027 remember_move (insn);
1029 else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
1031 rtx source;
1032 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1033 bitmap mentioned = info->regnos_mentioned;
1034 struct df_link *link;
1035 if (copy_insn_p (insn, &source, NULL))
1037 remember_move (insn);
1038 bitmap_set_bit (mentioned,
1039 REGNO (GET_CODE (source) == SUBREG
1040 ? SUBREG_REG (source) : source));
1042 for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
1043 if (link->ref)
1044 bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref));
1049 /* Handle the uses which reach a block end, but were deferred due
1050 to it's regno not being mentioned in that block. This adds the
1051 remaining conflicts and updates also the crosses_call and
1052 spanned_deaths members. */
1054 static void
1055 livethrough_conflicts_bb (bb)
1056 basic_block bb;
1058 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1059 rtx insn;
1060 bitmap all_defs;
1061 int first, use_id;
1062 unsigned int deaths = 0;
1063 unsigned int contains_call = 0;
1065 /* If there are no deferred uses, just return. */
1066 if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
1067 return;
1069 /* First collect the IDs of all defs, count the number of death
1070 containing insns, and if there's some call_insn here. */
1071 all_defs = BITMAP_XMALLOC ();
1072 for (insn = bb->head; insn; insn = NEXT_INSN (insn))
1074 if (INSN_P (insn))
1076 unsigned int n;
1077 struct ra_insn_info info;
1079 info = insn_df[INSN_UID (insn)];
1080 for (n = 0; n < info.num_defs; n++)
1081 bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
1082 if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
1083 deaths++;
1084 if (GET_CODE (insn) == CALL_INSN)
1085 contains_call = 1;
1087 if (insn == bb->end)
1088 break;
1091 /* And now, if we have found anything, make all live_through
1092 uses conflict with all defs, and update their other members. */
1093 if (deaths > 0 || bitmap_first_set_bit (all_defs) >= 0)
1094 EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id,
1096 struct web_part *wp = &web_parts[df->def_id + use_id];
1097 unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
1098 bitmap conflicts;
1099 wp = find_web_part (wp);
1100 wp->spanned_deaths += deaths;
1101 wp->crosses_call |= contains_call;
1102 conflicts = get_sub_conflicts (wp, bl);
1103 bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
1106 BITMAP_XFREE (all_defs);
1109 /* Allocate the per basic block info for traversing the insn stream for
1110 building live ranges. */
1112 static void
1113 init_bb_info ()
1115 basic_block bb;
1116 FOR_ALL_BB (bb)
1118 struct ra_bb_info *info =
1119 (struct ra_bb_info *) xcalloc (1, sizeof *info);
1120 info->regnos_mentioned = BITMAP_XMALLOC ();
1121 info->live_throughout = BITMAP_XMALLOC ();
1122 info->old_aux = bb->aux;
1123 bb->aux = (void *) info;
1127 /* Free that per basic block info. */
1129 static void
1130 free_bb_info ()
1132 basic_block bb;
1133 FOR_ALL_BB (bb)
1135 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1136 BITMAP_XFREE (info->regnos_mentioned);
1137 BITMAP_XFREE (info->live_throughout);
1138 bb->aux = info->old_aux;
1139 free (info);
1143 /* Toplevel function for the first part of this file.
1144 Connect web parts, thereby implicitly building webs, and remember
1145 their conflicts. */
1147 static void
1148 build_web_parts_and_conflicts (df)
1149 struct df *df;
1151 struct df_link *link;
1152 struct curr_use use;
1153 basic_block bb;
1155 number_seen = (int *) xcalloc (get_max_uid (), sizeof (int));
1156 visit_trace = (struct visit_trace *) xcalloc (get_max_uid (),
1157 sizeof (visit_trace[0]));
1158 update_regnos_mentioned ();
1160 /* Here's the main loop.
1161 It goes through all insn's, connects web parts along the way, notes
1162 conflicts between webparts, and remembers move instructions. */
1163 visited_pass = 0;
1164 for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++)
1165 if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno])
1166 for (link = df->regs[use.regno].uses; link; link = link->next)
1167 if (link->ref)
1169 struct ref *ref = link->ref;
1170 rtx insn = DF_REF_INSN (ref);
1171 /* Only recheck marked or new uses, or uses from hardregs. */
1172 if (use.regno >= FIRST_PSEUDO_REGISTER
1173 && DF_REF_ID (ref) < last_use_id
1174 && !TEST_BIT (last_check_uses, DF_REF_ID (ref)))
1175 continue;
1176 use.wp = &web_parts[df->def_id + DF_REF_ID (ref)];
1177 use.x = DF_REF_REG (ref);
1178 use.live_over_abnormal = 0;
1179 use.undefined = rtx_to_undefined (use.x);
1180 visited_pass++;
1181 live_in (df, &use, insn);
1182 if (use.live_over_abnormal)
1183 SET_BIT (live_over_abnormal, DF_REF_ID (ref));
1186 dump_number_seen ();
1187 FOR_ALL_BB (bb)
1189 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1190 livethrough_conflicts_bb (bb);
1191 bitmap_zero (info->live_throughout);
1192 info->pass = 0;
1194 free (visit_trace);
1195 free (number_seen);
1198 /* Here we look per insn, for DF references being in uses _and_ defs.
1199 This means, in the RTL a (REG xx) expression was seen as a
1200 read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...))
1201 e.g. Our code has created two webs for this, as it should. Unfortunately,
1202 as the REG reference is only one time in the RTL we can't color
1203 both webs different (arguably this also would be wrong for a real
1204 read-mod-write instruction), so we must reconnect such webs. */
1206 static void
1207 connect_rmw_web_parts (df)
1208 struct df *df;
1210 unsigned int i;
1212 for (i = 0; i < df->use_id; i++)
1214 struct web_part *wp1 = &web_parts[df->def_id + i];
1215 rtx reg;
1216 struct df_link *link;
1217 if (!wp1->ref)
1218 continue;
1219 /* If it's an uninitialized web, we don't want to connect it to others,
1220 as the read cycle in read-mod-write had probably no effect. */
1221 if (find_web_part (wp1) >= &web_parts[df->def_id])
1222 continue;
1223 reg = DF_REF_REAL_REG (wp1->ref);
1224 link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref));
1225 for (; link; link = link->next)
1226 if (reg == DF_REF_REAL_REG (link->ref))
1228 struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)];
1229 union_web_parts (wp1, wp2);
1234 /* Deletes all hardregs from *S which are not allowed for MODE. */
1236 static void
1237 prune_hardregs_for_mode (s, mode)
1238 HARD_REG_SET *s;
1239 enum machine_mode mode;
1241 AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
1244 /* Initialize the members of a web, which are deducible from REG. */
1246 static void
1247 init_one_web_common (web, reg)
1248 struct web *web;
1249 rtx reg;
1251 if (GET_CODE (reg) != REG)
1252 abort ();
1253 /* web->id isn't initialized here. */
1254 web->regno = REGNO (reg);
1255 web->orig_x = reg;
1256 if (!web->dlink)
1258 web->dlink = (struct dlist *) ra_calloc (sizeof (struct dlist));
1259 DLIST_WEB (web->dlink) = web;
1261 /* XXX
1262 the former (superunion) doesn't constrain the graph enough. E.g.
1263 on x86 QImode _requires_ QI_REGS, but as alternate class usually
1264 GENERAL_REGS is given. So the graph is not constrained enough,
1265 thinking it has more freedom then it really has, which leads
1266 to repeated spill tryings. OTOH the latter (only using preferred
1267 class) is too constrained, as normally (e.g. with all SImode
1268 pseudos), they can be allocated also in the alternate class.
1269 What we really want, are the _exact_ hard regs allowed, not
1270 just a class. Later. */
1271 /*web->regclass = reg_class_superunion
1272 [reg_preferred_class (web->regno)]
1273 [reg_alternate_class (web->regno)];*/
1274 /*web->regclass = reg_preferred_class (web->regno);*/
1275 web->regclass = reg_class_subunion
1276 [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];
1277 web->regclass = reg_preferred_class (web->regno);
1278 if (web->regno < FIRST_PSEUDO_REGISTER)
1280 web->color = web->regno;
1281 put_web (web, PRECOLORED);
1282 web->num_conflicts = UINT_MAX;
1283 web->add_hardregs = 0;
1284 CLEAR_HARD_REG_SET (web->usable_regs);
1285 SET_HARD_REG_BIT (web->usable_regs, web->regno);
1286 web->num_freedom = 1;
1288 else
1290 HARD_REG_SET alternate;
1291 web->color = -1;
1292 put_web (web, INITIAL);
1293 /* add_hardregs is wrong in multi-length classes, e.g.
1294 using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS,
1295 where, if it finally is allocated to GENERAL_REGS it needs two,
1296 if allocated to FLOAT_REGS only one hardreg. XXX */
1297 web->add_hardregs =
1298 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
1299 web->num_conflicts = 0 * web->add_hardregs;
1300 COPY_HARD_REG_SET (web->usable_regs,
1301 reg_class_contents[reg_preferred_class (web->regno)]);
1302 COPY_HARD_REG_SET (alternate,
1303 reg_class_contents[reg_alternate_class (web->regno)]);
1304 IOR_HARD_REG_SET (web->usable_regs, alternate);
1305 /*IOR_HARD_REG_SET (web->usable_regs,
1306 reg_class_contents[reg_alternate_class
1307 (web->regno)]);*/
1308 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
1309 prune_hardregs_for_mode (&web->usable_regs,
1310 PSEUDO_REGNO_MODE (web->regno));
1311 #ifdef CLASS_CANNOT_CHANGE_MODE
1312 if (web->mode_changed)
1313 AND_COMPL_HARD_REG_SET (web->usable_regs, reg_class_contents[
1314 (int) CLASS_CANNOT_CHANGE_MODE]);
1315 #endif
1316 web->num_freedom = hard_regs_count (web->usable_regs);
1317 web->num_freedom -= web->add_hardregs;
1318 if (!web->num_freedom)
1319 abort();
1321 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
1324 /* Initializes WEBs members from REG or zero them. */
1326 static void
1327 init_one_web (web, reg)
1328 struct web *web;
1329 rtx reg;
1331 memset (web, 0, sizeof (struct web));
1332 init_one_web_common (web, reg);
1333 web->useless_conflicts = BITMAP_XMALLOC ();
1336 /* WEB is an old web, meaning it came from the last pass, and got a
1337 color. We want to remember some of it's info, so zero only some
1338 members. */
1340 static void
1341 reinit_one_web (web, reg)
1342 struct web *web;
1343 rtx reg;
1345 web->old_color = web->color + 1;
1346 init_one_web_common (web, reg);
1347 web->span_deaths = 0;
1348 web->spill_temp = 0;
1349 web->orig_spill_temp = 0;
1350 web->use_my_regs = 0;
1351 web->spill_cost = 0;
1352 web->was_spilled = 0;
1353 web->is_coalesced = 0;
1354 web->artificial = 0;
1355 web->live_over_abnormal = 0;
1356 web->mode_changed = 0;
1357 web->move_related = 0;
1358 web->in_load = 0;
1359 web->target_of_spilled_move = 0;
1360 web->num_aliased = 0;
1361 if (web->type == PRECOLORED)
1363 web->num_defs = 0;
1364 web->num_uses = 0;
1365 web->orig_spill_cost = 0;
1367 CLEAR_HARD_REG_SET (web->bias_colors);
1368 CLEAR_HARD_REG_SET (web->prefer_colors);
1369 web->reg_rtx = NULL;
1370 web->stack_slot = NULL;
1371 web->pattern = NULL;
1372 web->alias = NULL;
1373 if (web->moves)
1374 abort ();
1375 if (!web->useless_conflicts)
1376 abort ();
1379 /* Insert and returns a subweb corresponding to REG into WEB (which
1380 becomes its super web). It must not exist already. */
1382 static struct web *
1383 add_subweb (web, reg)
1384 struct web *web;
1385 rtx reg;
1387 struct web *w;
1388 if (GET_CODE (reg) != SUBREG)
1389 abort ();
1390 w = (struct web *) xmalloc (sizeof (struct web));
1391 /* Copy most content from parent-web. */
1392 *w = *web;
1393 /* And initialize the private stuff. */
1394 w->orig_x = reg;
1395 w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1;
1396 w->num_conflicts = 0 * w->add_hardregs;
1397 w->num_defs = 0;
1398 w->num_uses = 0;
1399 w->dlink = NULL;
1400 w->parent_web = web;
1401 w->subreg_next = web->subreg_next;
1402 web->subreg_next = w;
1403 return w;
1406 /* Similar to add_subweb(), but instead of relying on a given SUBREG,
1407 we have just a size and an offset of the subpart of the REG rtx.
1408 In difference to add_subweb() this marks the new subweb as artificial. */
1410 static struct web *
1411 add_subweb_2 (web, size_word)
1412 struct web *web;
1413 unsigned int size_word;
1415 /* To get a correct mode for the to be produced subreg, we don't want to
1416 simply do a mode_for_size() for the mode_class of the whole web.
1417 Suppose we deal with a CDImode web, but search for a 8 byte part.
1418 Now mode_for_size() would only search in the class MODE_COMPLEX_INT
1419 and would find CSImode which probably is not what we want. Instead
1420 we want DImode, which is in a completely other class. For this to work
1421 we instead first search the already existing subwebs, and take
1422 _their_ modeclasses as base for a search for ourself. */
1423 rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x;
1424 unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT;
1425 enum machine_mode mode;
1426 mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
1427 if (mode == BLKmode)
1428 mode = mode_for_size (size, MODE_INT, 0);
1429 if (mode == BLKmode)
1430 abort ();
1431 web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
1432 BYTE_BEGIN (size_word)));
1433 web->artificial = 1;
1434 return web;
1437 /* Initialize all the web parts we are going to need. */
1439 static void
1440 init_web_parts (df)
1441 struct df *df;
1443 int regno;
1444 unsigned int no;
1445 num_webs = 0;
1446 for (no = 0; no < df->def_id; no++)
1448 if (df->defs[no])
1450 if (no < last_def_id && web_parts[no].ref != df->defs[no])
1451 abort ();
1452 web_parts[no].ref = df->defs[no];
1453 /* Uplink might be set from the last iteration. */
1454 if (!web_parts[no].uplink)
1455 num_webs++;
1457 else
1458 /* The last iteration might have left .ref set, while df_analyse()
1459 removed that ref (due to a removed copy insn) from the df->defs[]
1460 array. As we don't check for that in realloc_web_parts()
1461 we do that here. */
1462 web_parts[no].ref = NULL;
1464 for (no = 0; no < df->use_id; no++)
1466 if (df->uses[no])
1468 if (no < last_use_id
1469 && web_parts[no + df->def_id].ref != df->uses[no])
1470 abort ();
1471 web_parts[no + df->def_id].ref = df->uses[no];
1472 if (!web_parts[no + df->def_id].uplink)
1473 num_webs++;
1475 else
1476 web_parts[no + df->def_id].ref = NULL;
1479 /* We want to have only one web for each precolored register. */
1480 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1482 struct web_part *r1 = NULL;
1483 struct df_link *link;
1484 /* Here once was a test, if there is any DEF at all, and only then to
1485 merge all the parts. This was incorrect, we really also want to have
1486 only one web-part for hardregs, even if there is no explicit DEF. */
1487 /* Link together all defs... */
1488 for (link = df->regs[regno].defs; link; link = link->next)
1489 if (link->ref)
1491 struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)];
1492 if (!r1)
1493 r1 = r2;
1494 else
1495 r1 = union_web_parts (r1, r2);
1497 /* ... and all uses. */
1498 for (link = df->regs[regno].uses; link; link = link->next)
1499 if (link->ref)
1501 struct web_part *r2 = &web_parts[df->def_id
1502 + DF_REF_ID (link->ref)];
1503 if (!r1)
1504 r1 = r2;
1505 else
1506 r1 = union_web_parts (r1, r2);
1511 /* In case we want to remember the conflict list of a WEB, before adding
1512 new conflicts, we copy it here to orig_conflict_list. */
1514 static void
1515 copy_conflict_list (web)
1516 struct web *web;
1518 struct conflict_link *cl;
1519 if (web->orig_conflict_list || web->have_orig_conflicts)
1520 abort ();
1521 web->have_orig_conflicts = 1;
1522 for (cl = web->conflict_list; cl; cl = cl->next)
1524 struct conflict_link *ncl;
1525 ncl = (struct conflict_link *) ra_alloc (sizeof *ncl);
1526 ncl->t = cl->t;
1527 ncl->sub = NULL;
1528 ncl->next = web->orig_conflict_list;
1529 web->orig_conflict_list = ncl;
1530 if (cl->sub)
1532 struct sub_conflict *sl, *nsl;
1533 for (sl = cl->sub; sl; sl = sl->next)
1535 nsl = (struct sub_conflict *) ra_alloc (sizeof *nsl);
1536 nsl->s = sl->s;
1537 nsl->t = sl->t;
1538 nsl->next = ncl->sub;
1539 ncl->sub = nsl;
1545 /* Possibly add an edge from web FROM to TO marking a conflict between
1546 those two. This is one half of marking a complete conflict, which notes
1547 in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
1548 make other conflicts superfluous, because the current TO overlaps some web
1549 already being in conflict with FROM. In this case the smaller webs are
1550 deleted from the conflict list. Likewise if TO is overlapped by a web
1551 already in the list, it isn't added at all. Note, that this can only
1552 happen, if SUBREG webs are involved. */
1554 static void
1555 add_conflict_edge (from, to)
1556 struct web *from, *to;
1558 if (from->type != PRECOLORED)
1560 struct web *pfrom = find_web_for_subweb (from);
1561 struct web *pto = find_web_for_subweb (to);
1562 struct sub_conflict *sl;
1563 struct conflict_link *cl = pfrom->conflict_list;
1564 int may_delete = 1;
1566 /* This can happen when subwebs of one web conflict with each
1567 other. In live_out_1() we created such conflicts between yet
1568 undefined webparts and defs of parts which didn't overlap with the
1569 undefined bits. Then later they nevertheless could have merged into
1570 one web, and then we land here. */
1571 if (pfrom == pto)
1572 return;
1573 if (remember_conflicts && !pfrom->have_orig_conflicts)
1574 copy_conflict_list (pfrom);
1575 if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
1577 cl = (struct conflict_link *) ra_alloc (sizeof (*cl));
1578 cl->t = pto;
1579 cl->sub = NULL;
1580 cl->next = pfrom->conflict_list;
1581 pfrom->conflict_list = cl;
1582 if (pto->type != SELECT && pto->type != COALESCED)
1583 pfrom->num_conflicts += 1 + pto->add_hardregs;
1584 SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id));
1585 may_delete = 0;
1587 else
1588 /* We don't need to test for cl==NULL, because at this point
1589 a cl with cl->t==pto is guaranteed to exist. */
1590 while (cl->t != pto)
1591 cl = cl->next;
1592 if (pfrom != from || pto != to)
1594 /* This is a subconflict which should be added.
1595 If we inserted cl in this invocation, we really need to add this
1596 subconflict. If we did _not_ add it here, we only add the
1597 subconflict, if cl already had subconflicts, because otherwise
1598 this indicated, that the whole webs already conflict, which
1599 means we are not interested in this subconflict. */
1600 if (!may_delete || cl->sub != NULL)
1602 sl = (struct sub_conflict *) ra_alloc (sizeof (*sl));
1603 sl->s = from;
1604 sl->t = to;
1605 sl->next = cl->sub;
1606 cl->sub = sl;
1609 else
1610 /* pfrom == from && pto == to means, that we are not interested
1611 anymore in the subconflict list for this pair, because anyway
1612 the whole webs conflict. */
1613 cl->sub = NULL;
1617 /* Record a conflict between two webs, if we haven't recorded it
1618 already. */
1620 void
1621 record_conflict (web1, web2)
1622 struct web *web1, *web2;
1624 unsigned int id1 = web1->id, id2 = web2->id;
1625 unsigned int index = igraph_index (id1, id2);
1626 /* Trivial non-conflict or already recorded conflict. */
1627 if (web1 == web2 || TEST_BIT (igraph, index))
1628 return;
1629 if (id1 == id2)
1630 abort ();
1631 /* As fixed_regs are no targets for allocation, conflicts with them
1632 are pointless. */
1633 if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
1634 || (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno]))
1635 return;
1636 /* Conflicts with hardregs, which are not even a candidate
1637 for this pseudo are also pointless. */
1638 if ((web1->type == PRECOLORED
1639 && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
1640 || (web2->type == PRECOLORED
1641 && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
1642 return;
1643 /* Similar if the set of possible hardregs don't intersect. This iteration
1644 those conflicts are useless (and would make num_conflicts wrong, because
1645 num_freedom is calculated from the set of possible hardregs).
1646 But in presence of spilling and incremental building of the graph we
1647 need to note all uses of webs conflicting with the spilled ones.
1648 Because the set of possible hardregs can change in the next round for
1649 spilled webs, we possibly have then conflicts with webs which would
1650 be excluded now (because then hardregs intersect). But we actually
1651 need to check those uses, and to get hold of them, we need to remember
1652 also webs conflicting with this one, although not conflicting in this
1653 round because of non-intersecting hardregs. */
1654 if (web1->type != PRECOLORED && web2->type != PRECOLORED
1655 && ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs))
1657 struct web *p1 = find_web_for_subweb (web1);
1658 struct web *p2 = find_web_for_subweb (web2);
1659 /* We expect these to be rare enough to justify bitmaps. And because
1660 we have only a special use for it, we note only the superwebs. */
1661 bitmap_set_bit (p1->useless_conflicts, p2->id);
1662 bitmap_set_bit (p2->useless_conflicts, p1->id);
1663 return;
1665 SET_BIT (igraph, index);
1666 add_conflict_edge (web1, web2);
1667 add_conflict_edge (web2, web1);
1670 /* For each web W this produces the missing subwebs Wx, such that it's
1671 possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
1673 static void
1674 build_inverse_webs (web)
1675 struct web *web;
1677 struct web *sweb = web->subreg_next;
1678 unsigned HOST_WIDE_INT undef;
1680 undef = rtx_to_undefined (web->orig_x);
1681 for (; sweb; sweb = sweb->subreg_next)
1682 /* Only create inverses of non-artificial webs. */
1683 if (!sweb->artificial)
1685 unsigned HOST_WIDE_INT bits;
1686 bits = undef & ~ rtx_to_undefined (sweb->orig_x);
1687 while (bits)
1689 unsigned int size_word = undef_to_size_word (web->orig_x, &bits);
1690 if (!find_subweb_2 (web, size_word))
1691 add_subweb_2 (web, size_word);
1696 /* Copies the content of WEB to a new one, and link it into WL.
1697 Used for consistency checking. */
1699 static void
1700 copy_web (web, wl)
1701 struct web *web;
1702 struct web_link **wl;
1704 struct web *cweb = (struct web *) xmalloc (sizeof *cweb);
1705 struct web_link *link = (struct web_link *) ra_alloc (sizeof *link);
1706 link->next = *wl;
1707 *wl = link;
1708 link->web = cweb;
1709 *cweb = *web;
1712 /* Given a list of webs LINK, compare the content of the webs therein
1713 with the global webs of the same ID. For consistency checking. */
1715 static void
1716 compare_and_free_webs (link)
1717 struct web_link **link;
1719 struct web_link *wl;
1720 for (wl = *link; wl; wl = wl->next)
1722 struct web *web1 = wl->web;
1723 struct web *web2 = ID2WEB (web1->id);
1724 if (web1->regno != web2->regno
1725 || web1->crosses_call != web2->crosses_call
1726 || web1->live_over_abnormal != web2->live_over_abnormal
1727 || web1->mode_changed != web2->mode_changed
1728 || !rtx_equal_p (web1->orig_x, web2->orig_x)
1729 || web1->type != web2->type
1730 /* Only compare num_defs/num_uses with non-hardreg webs.
1731 E.g. the number of uses of the framepointer changes due to
1732 inserting spill code. */
1733 || (web1->type != PRECOLORED &&
1734 (web1->num_uses != web2->num_uses
1735 || web1->num_defs != web2->num_defs)))
1736 abort ();
1737 if (web1->type != PRECOLORED)
1739 unsigned int i;
1740 for (i = 0; i < web1->num_defs; i++)
1741 if (web1->defs[i] != web2->defs[i])
1742 abort ();
1743 for (i = 0; i < web1->num_uses; i++)
1744 if (web1->uses[i] != web2->uses[i])
1745 abort ();
1747 if (web1->type == PRECOLORED)
1749 if (web1->defs)
1750 free (web1->defs);
1751 if (web1->uses)
1752 free (web1->uses);
1754 free (web1);
1756 *link = NULL;
1759 /* Setup and fill uses[] and defs[] arrays of the webs. */
1761 static void
1762 init_webs_defs_uses ()
1764 struct dlist *d;
1765 for (d = WEBS(INITIAL); d; d = d->next)
1767 struct web *web = DLIST_WEB (d);
1768 unsigned int def_i, use_i;
1769 struct df_link *link;
1770 if (web->old_web)
1771 continue;
1772 if (web->type == PRECOLORED)
1774 web->num_defs = web->num_uses = 0;
1775 continue;
1777 if (web->num_defs)
1778 web->defs = (struct ref **) xmalloc (web->num_defs *
1779 sizeof (web->defs[0]));
1780 if (web->num_uses)
1781 web->uses = (struct ref **) xmalloc (web->num_uses *
1782 sizeof (web->uses[0]));
1783 def_i = use_i = 0;
1784 for (link = web->temp_refs; link; link = link->next)
1786 if (DF_REF_REG_DEF_P (link->ref))
1787 web->defs[def_i++] = link->ref;
1788 else
1789 web->uses[use_i++] = link->ref;
1791 web->temp_refs = NULL;
1792 if (def_i != web->num_defs || use_i != web->num_uses)
1793 abort ();
1797 /* Called by parts_to_webs(). This creates (or recreates) the webs (and
1798 subwebs) from web parts, gives them IDs (only to super webs), and sets
1799 up use2web and def2web arrays. */
1801 static unsigned int
1802 parts_to_webs_1 (df, copy_webs, all_refs)
1803 struct df *df;
1804 struct web_link **copy_webs;
1805 struct df_link *all_refs;
1807 unsigned int i;
1808 unsigned int webnum;
1809 unsigned int def_id = df->def_id;
1810 unsigned int use_id = df->use_id;
1811 struct web_part *wp_first_use = &web_parts[def_id];
1813 /* For each root web part: create and initialize a new web,
1814 setup def2web[] and use2web[] for all defs and uses, and
1815 id2web for all new webs. */
1817 webnum = 0;
1818 for (i = 0; i < def_id + use_id; i++)
1820 struct web *subweb, *web = 0; /* Initialize web to silence warnings. */
1821 struct web_part *wp = &web_parts[i];
1822 struct ref *ref = wp->ref;
1823 unsigned int ref_id;
1824 rtx reg;
1825 if (!ref)
1826 continue;
1827 ref_id = i;
1828 if (i >= def_id)
1829 ref_id -= def_id;
1830 all_refs[i].ref = ref;
1831 reg = DF_REF_REG (ref);
1832 if (! wp->uplink)
1834 /* If we have a web part root, create a new web. */
1835 unsigned int newid = ~(unsigned)0;
1836 unsigned int old_web = 0;
1838 /* In the first pass, there are no old webs, so unconditionally
1839 allocate a new one. */
1840 if (ra_pass == 1)
1842 web = (struct web *) xmalloc (sizeof (struct web));
1843 newid = last_num_webs++;
1844 init_one_web (web, GET_CODE (reg) == SUBREG
1845 ? SUBREG_REG (reg) : reg);
1847 /* Otherwise, we look for an old web. */
1848 else
1850 /* Remember, that use2web == def2web + def_id.
1851 Ergo is def2web[i] == use2web[i - def_id] for i >= def_id.
1852 So we only need to look into def2web[] array.
1853 Try to look at the web, which formerly belonged to this
1854 def (or use). */
1855 web = def2web[i];
1856 /* Or which belonged to this hardreg. */
1857 if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER)
1858 web = hardreg2web[DF_REF_REGNO (ref)];
1859 if (web)
1861 /* If we found one, reuse it. */
1862 web = find_web_for_subweb (web);
1863 remove_list (web->dlink, &WEBS(INITIAL));
1864 old_web = 1;
1865 copy_web (web, copy_webs);
1867 else
1869 /* Otherwise use a new one. First from the free list. */
1870 if (WEBS(FREE))
1871 web = DLIST_WEB (pop_list (&WEBS(FREE)));
1872 else
1874 /* Else allocate a new one. */
1875 web = (struct web *) xmalloc (sizeof (struct web));
1876 newid = last_num_webs++;
1879 /* The id is zeroed in init_one_web(). */
1880 if (newid == ~(unsigned)0)
1881 newid = web->id;
1882 if (old_web)
1883 reinit_one_web (web, GET_CODE (reg) == SUBREG
1884 ? SUBREG_REG (reg) : reg);
1885 else
1886 init_one_web (web, GET_CODE (reg) == SUBREG
1887 ? SUBREG_REG (reg) : reg);
1888 web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0;
1890 web->span_deaths = wp->spanned_deaths;
1891 web->crosses_call = wp->crosses_call;
1892 web->id = newid;
1893 web->temp_refs = NULL;
1894 webnum++;
1895 if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno])
1896 hardreg2web[web->regno] = web;
1897 else if (web->regno < FIRST_PSEUDO_REGISTER
1898 && hardreg2web[web->regno] != web)
1899 abort ();
1902 /* If this reference already had a web assigned, we are done.
1903 This test better is equivalent to the web being an old web.
1904 Otherwise something is screwed. (This is tested) */
1905 if (def2web[i] != NULL)
1907 web = def2web[i];
1908 web = find_web_for_subweb (web);
1909 /* But if this ref includes a mode change, or was a use live
1910 over an abnormal call, set appropriate flags in the web. */
1911 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1912 && web->regno >= FIRST_PSEUDO_REGISTER)
1913 web->mode_changed = 1;
1914 if (i >= def_id
1915 && TEST_BIT (live_over_abnormal, ref_id))
1916 web->live_over_abnormal = 1;
1917 /* And check, that it's not a newly allocated web. This would be
1918 an inconsistency. */
1919 if (!web->old_web || web->type == PRECOLORED)
1920 abort ();
1921 continue;
1923 /* In case this was no web part root, we need to initialize WEB
1924 from the ref2web array belonging to the root. */
1925 if (wp->uplink)
1927 struct web_part *rwp = find_web_part (wp);
1928 unsigned int j = DF_REF_ID (rwp->ref);
1929 if (rwp < wp_first_use)
1930 web = def2web[j];
1931 else
1932 web = use2web[j];
1933 web = find_web_for_subweb (web);
1936 /* Remember all references for a web in a single linked list. */
1937 all_refs[i].next = web->temp_refs;
1938 web->temp_refs = &all_refs[i];
1940 /* And the test, that if def2web[i] was NULL above, that we are _not_
1941 an old web. */
1942 if (web->old_web && web->type != PRECOLORED)
1943 abort ();
1945 /* Possible create a subweb, if this ref was a subreg. */
1946 if (GET_CODE (reg) == SUBREG)
1948 subweb = find_subweb (web, reg);
1949 if (!subweb)
1951 subweb = add_subweb (web, reg);
1952 if (web->old_web)
1953 abort ();
1956 else
1957 subweb = web;
1959 /* And look, if the ref involves an invalid mode change. */
1960 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1961 && web->regno >= FIRST_PSEUDO_REGISTER)
1962 web->mode_changed = 1;
1964 /* Setup def2web, or use2web, and increment num_defs or num_uses. */
1965 if (i < def_id)
1967 /* Some sanity checks. */
1968 if (ra_pass > 1)
1970 struct web *compare = def2web[i];
1971 if (i < last_def_id)
1973 if (web->old_web && compare != subweb)
1974 abort ();
1976 if (!web->old_web && compare)
1977 abort ();
1978 if (compare && compare != subweb)
1979 abort ();
1981 def2web[i] = subweb;
1982 web->num_defs++;
1984 else
1986 if (ra_pass > 1)
1988 struct web *compare = use2web[ref_id];
1989 if (ref_id < last_use_id)
1991 if (web->old_web && compare != subweb)
1992 abort ();
1994 if (!web->old_web && compare)
1995 abort ();
1996 if (compare && compare != subweb)
1997 abort ();
1999 use2web[ref_id] = subweb;
2000 web->num_uses++;
2001 if (TEST_BIT (live_over_abnormal, ref_id))
2002 web->live_over_abnormal = 1;
2006 /* We better now have exactly as many webs as we had web part roots. */
2007 if (webnum != num_webs)
2008 abort ();
2010 return webnum;
2013 /* This builds full webs out of web parts, without relating them to each
2014 other (i.e. without creating the conflict edges). */
2016 static void
2017 parts_to_webs (df)
2018 struct df *df;
2020 unsigned int i;
2021 unsigned int webnum;
2022 struct web_link *copy_webs = NULL;
2023 struct dlist *d;
2024 struct df_link *all_refs;
2025 num_subwebs = 0;
2027 /* First build webs and ordinary subwebs. */
2028 all_refs = (struct df_link *) xcalloc (df->def_id + df->use_id,
2029 sizeof (all_refs[0]));
2030 webnum = parts_to_webs_1 (df, &copy_webs, all_refs);
2032 /* Setup the webs for hardregs which are still missing (weren't
2033 mentioned in the code). */
2034 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2035 if (!hardreg2web[i])
2037 struct web *web = (struct web *) xmalloc (sizeof (struct web));
2038 init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
2039 web->id = last_num_webs++;
2040 hardreg2web[web->regno] = web;
2042 num_webs = last_num_webs;
2044 /* Now create all artificial subwebs, i.e. those, which do
2045 not correspond to a real subreg in the current function's RTL, but
2046 which nevertheless is a target of a conflict.
2047 XXX we need to merge this loop with the one above, which means, we need
2048 a way to later override the artificiality. Beware: currently
2049 add_subweb_2() relies on the existence of normal subwebs for deducing
2050 a sane mode to use for the artificial subwebs. */
2051 for (i = 0; i < df->def_id + df->use_id; i++)
2053 struct web_part *wp = &web_parts[i];
2054 struct tagged_conflict *cl;
2055 struct web *web;
2056 if (wp->uplink || !wp->ref)
2058 if (wp->sub_conflicts)
2059 abort ();
2060 continue;
2062 web = def2web[i];
2063 web = find_web_for_subweb (web);
2064 for (cl = wp->sub_conflicts; cl; cl = cl->next)
2065 if (!find_subweb_2 (web, cl->size_word))
2066 add_subweb_2 (web, cl->size_word);
2069 /* And now create artificial subwebs needed for representing the inverse
2070 of some subwebs. This also gives IDs to all subwebs. */
2071 webnum = last_num_webs;
2072 for (d = WEBS(INITIAL); d; d = d->next)
2074 struct web *web = DLIST_WEB (d);
2075 if (web->subreg_next)
2077 struct web *sweb;
2078 build_inverse_webs (web);
2079 for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next)
2080 sweb->id = webnum++;
2084 /* Now that everyone has an ID, we can setup the id2web array. */
2085 id2web = (struct web **) xcalloc (webnum, sizeof (id2web[0]));
2086 for (d = WEBS(INITIAL); d; d = d->next)
2088 struct web *web = DLIST_WEB (d);
2089 ID2WEB (web->id) = web;
2090 for (web = web->subreg_next; web; web = web->subreg_next)
2091 ID2WEB (web->id) = web;
2093 num_subwebs = webnum - last_num_webs;
2094 num_allwebs = num_webs + num_subwebs;
2095 num_webs += num_subwebs;
2097 /* Allocate and clear the conflict graph bitmaps. */
2098 igraph = sbitmap_alloc (num_webs * num_webs / 2);
2099 sup_igraph = sbitmap_alloc (num_webs * num_webs);
2100 sbitmap_zero (igraph);
2101 sbitmap_zero (sup_igraph);
2103 /* Distribute the references to their webs. */
2104 init_webs_defs_uses ();
2105 /* And do some sanity checks if old webs, and those recreated from the
2106 really are the same. */
2107 compare_and_free_webs (&copy_webs);
2108 free (all_refs);
2111 /* This deletes all conflicts to and from webs which need to be renewed
2112 in this pass of the allocator, i.e. those which were spilled in the
2113 last pass. Furthermore it also rebuilds the bitmaps for the remaining
2114 conflicts. */
2116 static void
2117 reset_conflicts ()
2119 unsigned int i;
2120 bitmap newwebs = BITMAP_XMALLOC ();
2121 for (i = 0; i < num_webs - num_subwebs; i++)
2123 struct web *web = ID2WEB (i);
2124 /* Hardreg webs and non-old webs are new webs (which
2125 need rebuilding). */
2126 if (web->type == PRECOLORED || !web->old_web)
2127 bitmap_set_bit (newwebs, web->id);
2130 for (i = 0; i < num_webs - num_subwebs; i++)
2132 struct web *web = ID2WEB (i);
2133 struct conflict_link *cl;
2134 struct conflict_link **pcl;
2135 pcl = &(web->conflict_list);
2137 /* First restore the conflict list to be like it was before
2138 coalescing. */
2139 if (web->have_orig_conflicts)
2141 web->conflict_list = web->orig_conflict_list;
2142 web->orig_conflict_list = NULL;
2144 if (web->orig_conflict_list)
2145 abort ();
2147 /* New non-precolored webs, have no conflict list. */
2148 if (web->type != PRECOLORED && !web->old_web)
2150 *pcl = NULL;
2151 /* Useless conflicts will be rebuilt completely. But check
2152 for cleanliness, as the web might have come from the
2153 free list. */
2154 if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
2155 abort ();
2157 else
2159 /* Useless conflicts with new webs will be rebuilt if they
2160 are still there. */
2161 bitmap_operation (web->useless_conflicts, web->useless_conflicts,
2162 newwebs, BITMAP_AND_COMPL);
2163 /* Go through all conflicts, and retain those to old webs. */
2164 for (cl = web->conflict_list; cl; cl = cl->next)
2166 if (cl->t->old_web || cl->t->type == PRECOLORED)
2168 *pcl = cl;
2169 pcl = &(cl->next);
2171 /* Also restore the entries in the igraph bitmaps. */
2172 web->num_conflicts += 1 + cl->t->add_hardregs;
2173 SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id));
2174 /* No subconflicts mean full webs conflict. */
2175 if (!cl->sub)
2176 SET_BIT (igraph, igraph_index (web->id, cl->t->id));
2177 else
2178 /* Else only the parts in cl->sub must be in the
2179 bitmap. */
2181 struct sub_conflict *sl;
2182 for (sl = cl->sub; sl; sl = sl->next)
2183 SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2187 *pcl = NULL;
2189 web->have_orig_conflicts = 0;
2191 BITMAP_XFREE (newwebs);
2194 /* For each web check it's num_conflicts member against that
2195 number, as calculated from scratch from all neighbors. */
2197 #if 0
2198 static void
2199 check_conflict_numbers ()
2201 unsigned int i;
2202 for (i = 0; i < num_webs; i++)
2204 struct web *web = ID2WEB (i);
2205 int new_conf = 0 * web->add_hardregs;
2206 struct conflict_link *cl;
2207 for (cl = web->conflict_list; cl; cl = cl->next)
2208 if (cl->t->type != SELECT && cl->t->type != COALESCED)
2209 new_conf += 1 + cl->t->add_hardregs;
2210 if (web->type != PRECOLORED && new_conf != web->num_conflicts)
2211 abort ();
2214 #endif
2216 /* Convert the conflicts between web parts to conflicts between full webs.
2218 This can't be done in parts_to_webs(), because for recording conflicts
2219 between webs we need to know their final usable_regs set, which is used
2220 to discard non-conflicts (between webs having no hard reg in common).
2221 But this is set for spill temporaries only after the webs itself are
2222 built. Until then the usable_regs set is based on the pseudo regno used
2223 in this web, which may contain far less registers than later determined.
2224 This would result in us loosing conflicts (due to record_conflict()
2225 thinking that a web can only be allocated to the current usable_regs,
2226 whereas later this is extended) leading to colorings, where some regs which
2227 in reality conflict get the same color. */
2229 static void
2230 conflicts_between_webs (df)
2231 struct df *df;
2233 unsigned int i;
2234 #ifdef STACK_REGS
2235 struct dlist *d;
2236 #endif
2237 bitmap ignore_defs = BITMAP_XMALLOC ();
2238 unsigned int have_ignored;
2239 unsigned int *pass_cache = (unsigned int *) xcalloc (num_webs, sizeof (int));
2240 unsigned int pass = 0;
2242 if (ra_pass > 1)
2243 reset_conflicts ();
2245 /* It is possible, that in the conflict bitmaps still some defs I are noted,
2246 which have web_parts[I].ref being NULL. This can happen, when from the
2247 last iteration the conflict bitmap for this part wasn't deleted, but a
2248 conflicting move insn was removed. It's DEF is still in the conflict
2249 bitmap, but it doesn't exist anymore in df->defs. To not have to check
2250 it in the tight loop below, we instead remember the ID's of them in a
2251 bitmap, and loop only over IDs which are not in it. */
2252 for (i = 0; i < df->def_id; i++)
2253 if (web_parts[i].ref == NULL)
2254 bitmap_set_bit (ignore_defs, i);
2255 have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
2257 /* Now record all conflicts between webs. Note that we only check
2258 the conflict bitmaps of all defs. Conflict bitmaps are only in
2259 webpart roots. If they are in uses, those uses are roots, which
2260 means, that this is an uninitialized web, whose conflicts
2261 don't matter. Nevertheless for hardregs we also need to check uses.
2262 E.g. hardregs used for argument passing have no DEF in the RTL,
2263 but if they have uses, they indeed conflict with all DEFs they
2264 overlap. */
2265 for (i = 0; i < df->def_id + df->use_id; i++)
2267 struct tagged_conflict *cl = web_parts[i].sub_conflicts;
2268 struct web *supweb1;
2269 if (!cl
2270 || (i >= df->def_id
2271 && DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER))
2272 continue;
2273 supweb1 = def2web[i];
2274 supweb1 = find_web_for_subweb (supweb1);
2275 for (; cl; cl = cl->next)
2276 if (cl->conflicts)
2278 int j;
2279 struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
2280 if (have_ignored)
2281 bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
2282 BITMAP_AND_COMPL);
2283 /* We reduce the number of calls to record_conflict() with this
2284 pass thing. record_conflict() itself also has some early-out
2285 optimizations, but here we can use the special properties of
2286 the loop (constant web1) to reduce that even more.
2287 We once used an sbitmap of already handled web indices,
2288 but sbitmaps are slow to clear and bitmaps are slow to
2289 set/test. The current approach needs more memory, but
2290 locality is large. */
2291 pass++;
2293 /* Note, that there are only defs in the conflicts bitset. */
2294 EXECUTE_IF_SET_IN_BITMAP (
2295 cl->conflicts, 0, j,
2297 struct web *web2 = def2web[j];
2298 unsigned int id2 = web2->id;
2299 if (pass_cache[id2] != pass)
2301 pass_cache[id2] = pass;
2302 record_conflict (web1, web2);
2308 free (pass_cache);
2309 BITMAP_XFREE (ignore_defs);
2311 #ifdef STACK_REGS
2312 /* Pseudos can't go in stack regs if they are live at the beginning of
2313 a block that is reached by an abnormal edge. */
2314 for (d = WEBS(INITIAL); d; d = d->next)
2316 struct web *web = DLIST_WEB (d);
2317 int j;
2318 if (web->live_over_abnormal)
2319 for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
2320 record_conflict (web, hardreg2web[j]);
2322 #endif
2325 /* Remember that a web was spilled, and change some characteristics
2326 accordingly. */
2328 static void
2329 remember_web_was_spilled (web)
2330 struct web *web;
2332 int i;
2333 unsigned int found_size = 0;
2334 int adjust;
2335 web->spill_temp = 1;
2337 /* From now on don't use reg_pref/alt_class (regno) anymore for
2338 this web, but instead usable_regs. We can't use spill_temp for
2339 this, as it might get reset later, when we are coalesced to a
2340 non-spill-temp. In that case we still want to use usable_regs. */
2341 web->use_my_regs = 1;
2343 /* We don't constrain spill temporaries in any way for now.
2344 It's wrong sometimes to have the same constraints or
2345 preferences as the original pseudo, esp. if they were very narrow.
2346 (E.g. there once was a reg wanting class AREG (only one register)
2347 without alternative class. As long, as also the spill-temps for
2348 this pseudo had the same constraints it was spilled over and over.
2349 Ideally we want some constraints also on spill-temps: Because they are
2350 not only loaded/stored, but also worked with, any constraints from insn
2351 alternatives needs applying. Currently this is dealt with by reload, as
2352 many other things, but at some time we want to integrate that
2353 functionality into the allocator. */
2354 if (web->regno >= max_normal_pseudo)
2356 COPY_HARD_REG_SET (web->usable_regs,
2357 reg_class_contents[reg_preferred_class (web->regno)]);
2358 IOR_HARD_REG_SET (web->usable_regs,
2359 reg_class_contents[reg_alternate_class (web->regno)]);
2361 else
2362 COPY_HARD_REG_SET (web->usable_regs,
2363 reg_class_contents[(int) GENERAL_REGS]);
2364 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
2365 prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
2366 #ifdef CLASS_CANNOT_CHANGE_MODE
2367 if (web->mode_changed)
2368 AND_COMPL_HARD_REG_SET (web->usable_regs, reg_class_contents[
2369 (int) CLASS_CANNOT_CHANGE_MODE]);
2370 #endif
2371 web->num_freedom = hard_regs_count (web->usable_regs);
2372 if (!web->num_freedom)
2373 abort();
2374 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
2375 /* Now look for a class, which is subset of our constraints, to
2376 setup add_hardregs, and regclass for debug output. */
2377 web->regclass = NO_REGS;
2378 for (i = (int) ALL_REGS - 1; i > 0; i--)
2380 unsigned int size;
2381 HARD_REG_SET test;
2382 COPY_HARD_REG_SET (test, reg_class_contents[i]);
2383 AND_COMPL_HARD_REG_SET (test, never_use_colors);
2384 GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found);
2385 continue;
2386 found:
2387 /* Measure the actual number of bits which really are overlapping
2388 the target regset, not just the reg_class_size. */
2389 size = hard_regs_count (test);
2390 if (found_size < size)
2392 web->regclass = (enum reg_class) i;
2393 found_size = size;
2397 adjust = 0 * web->add_hardregs;
2398 web->add_hardregs =
2399 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
2400 web->num_freedom -= web->add_hardregs;
2401 if (!web->num_freedom)
2402 abort();
2403 adjust -= 0 * web->add_hardregs;
2404 web->num_conflicts -= adjust;
2407 /* Look at each web, if it is used as spill web. Or better said,
2408 if it will be spillable in this pass. */
2410 static void
2411 detect_spill_temps ()
2413 struct dlist *d;
2414 bitmap already = BITMAP_XMALLOC ();
2416 /* Detect webs used for spill temporaries. */
2417 for (d = WEBS(INITIAL); d; d = d->next)
2419 struct web *web = DLIST_WEB (d);
2421 /* Below only the detection of spill temporaries. We never spill
2422 precolored webs, so those can't be spill temporaries. The code above
2423 (remember_web_was_spilled) can't currently cope with hardregs
2424 anyway. */
2425 if (web->regno < FIRST_PSEUDO_REGISTER)
2426 continue;
2427 /* Uninitialized webs can't be spill-temporaries. */
2428 if (web->num_defs == 0)
2429 continue;
2431 /* A web with only defs and no uses can't be spilled. Nevertheless
2432 it must get a color, as it takes away an register from all webs
2433 live at these defs. So we make it a short web. */
2434 if (web->num_uses == 0)
2435 web->spill_temp = 3;
2436 /* A web which was spilled last time, but for which no insns were
2437 emitted (can happen with IR spilling ignoring sometimes
2438 all deaths). */
2439 else if (web->changed)
2440 web->spill_temp = 1;
2441 /* A spill temporary has one def, one or more uses, all uses
2442 are in one insn, and either the def or use insn was inserted
2443 by the allocator. */
2444 /* XXX not correct currently. There might also be spill temps
2445 involving more than one def. Usually that's an additional
2446 clobber in the using instruction. We might also constrain
2447 ourself to that, instead of like currently marking all
2448 webs involving any spill insns at all. */
2449 else
2451 unsigned int i;
2452 int spill_involved = 0;
2453 for (i = 0; i < web->num_uses && !spill_involved; i++)
2454 if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid)
2455 spill_involved = 1;
2456 for (i = 0; i < web->num_defs && !spill_involved; i++)
2457 if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid)
2458 spill_involved = 1;
2460 if (spill_involved/* && ra_pass > 2*/)
2462 int num_deaths = web->span_deaths;
2463 /* Mark webs involving at least one spill insn as
2464 spill temps. */
2465 remember_web_was_spilled (web);
2466 /* Search for insns which define and use the web in question
2467 at the same time, i.e. look for rmw insns. If these insns
2468 are also deaths of other webs they might have been counted
2469 as such into web->span_deaths. But because of the rmw nature
2470 of this insn it is no point where a load/reload could be
2471 placed successfully (it would still conflict with the
2472 dead web), so reduce the number of spanned deaths by those
2473 insns. Note that sometimes such deaths are _not_ counted,
2474 so negative values can result. */
2475 bitmap_zero (already);
2476 for (i = 0; i < web->num_defs; i++)
2478 rtx insn = web->defs[i]->insn;
2479 if (TEST_BIT (insns_with_deaths, INSN_UID (insn))
2480 && !bitmap_bit_p (already, INSN_UID (insn)))
2482 unsigned int j;
2483 bitmap_set_bit (already, INSN_UID (insn));
2484 /* Only decrement it once for each insn. */
2485 for (j = 0; j < web->num_uses; j++)
2486 if (web->uses[j]->insn == insn)
2488 num_deaths--;
2489 break;
2493 /* But mark them specially if they could possibly be spilled,
2494 either because they cross some deaths (without the above
2495 mentioned ones) or calls. */
2496 if (web->crosses_call || num_deaths > 0)
2497 web->spill_temp = 1 * 2;
2499 /* A web spanning no deaths can't be spilled either. No loads
2500 would be created for it, ergo no defs. So the insns wouldn't
2501 change making the graph not easier to color. Make this also
2502 a short web. Don't do this if it crosses calls, as these are
2503 also points of reloads. */
2504 else if (web->span_deaths == 0 && !web->crosses_call)
2505 web->spill_temp = 3;
2507 web->orig_spill_temp = web->spill_temp;
2509 BITMAP_XFREE (already);
2512 /* Returns nonzero if the rtx MEM refers somehow to a stack location. */
2515 memref_is_stack_slot (mem)
2516 rtx mem;
2518 rtx ad = XEXP (mem, 0);
2519 rtx x;
2520 if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT)
2521 return 0;
2522 x = XEXP (ad, 0);
2523 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2524 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
2525 || x == stack_pointer_rtx)
2526 return 1;
2527 return 0;
2530 /* Returns nonzero, if rtx X somewhere contains any pseudo register. */
2532 static int
2533 contains_pseudo (x)
2534 rtx x;
2536 const char *fmt;
2537 int i;
2538 if (GET_CODE (x) == SUBREG)
2539 x = SUBREG_REG (x);
2540 if (GET_CODE (x) == REG)
2542 if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
2543 return 1;
2544 else
2545 return 0;
2548 fmt = GET_RTX_FORMAT (GET_CODE (x));
2549 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2550 if (fmt[i] == 'e')
2552 if (contains_pseudo (XEXP (x, i)))
2553 return 1;
2555 else if (fmt[i] == 'E')
2557 int j;
2558 for (j = 0; j < XVECLEN (x, i); j++)
2559 if (contains_pseudo (XVECEXP (x, i, j)))
2560 return 1;
2562 return 0;
2565 /* Returns nonzero, if we are able to rematerialize something with
2566 value X. If it's not a general operand, we test if we can produce
2567 a valid insn which set a pseudo to that value, and that insn doesn't
2568 clobber anything. */
2570 static GTY(()) rtx remat_test_insn;
2571 static int
2572 want_to_remat (x)
2573 rtx x;
2575 int num_clobbers = 0;
2576 int icode;
2578 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
2579 if (general_operand (x, GET_MODE (x)))
2580 return 1;
2582 /* Otherwise, check if we can make a valid insn from it. First initialize
2583 our test insn if we haven't already. */
2584 if (remat_test_insn == 0)
2586 remat_test_insn
2587 = make_insn_raw (gen_rtx_SET (VOIDmode,
2588 gen_rtx_REG (word_mode,
2589 FIRST_PSEUDO_REGISTER * 2),
2590 const0_rtx));
2591 NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0;
2594 /* Now make an insn like the one we would make when rematerializing
2595 the value X and see if valid. */
2596 PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x));
2597 SET_SRC (PATTERN (remat_test_insn)) = x;
2598 /* XXX For now we don't allow any clobbers to be added, not just no
2599 hardreg clobbers. */
2600 return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn,
2601 &num_clobbers)) >= 0
2602 && (num_clobbers == 0
2603 /*|| ! added_clobbers_hard_reg_p (icode)*/));
2606 /* Look at all webs, if they perhaps are rematerializable.
2607 They are, if all their defs are simple sets to the same value,
2608 and that value is simple enough, and want_to_remat() holds for it. */
2610 static void
2611 detect_remat_webs ()
2613 struct dlist *d;
2614 for (d = WEBS(INITIAL); d; d = d->next)
2616 struct web *web = DLIST_WEB (d);
2617 unsigned int i;
2618 rtx pat = NULL_RTX;
2619 /* Hardregs and useless webs aren't spilled -> no remat necessary.
2620 Defless webs obviously also can't be rematerialized. */
2621 if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs
2622 || !web->num_uses)
2623 continue;
2624 for (i = 0; i < web->num_defs; i++)
2626 rtx insn;
2627 rtx set = single_set (insn = DF_REF_INSN (web->defs[i]));
2628 rtx src;
2629 if (!set)
2630 break;
2631 src = SET_SRC (set);
2632 /* When only subregs of the web are set it isn't easily
2633 rematerializable. */
2634 if (!rtx_equal_p (SET_DEST (set), web->orig_x))
2635 break;
2636 /* If we already have a pattern it must be equal to the current. */
2637 if (pat && !rtx_equal_p (pat, src))
2638 break;
2639 /* Don't do the expensive checks multiple times. */
2640 if (pat)
2641 continue;
2642 /* For now we allow only constant sources. */
2643 if ((CONSTANT_P (src)
2644 /* If the whole thing is stable already, it is a source for
2645 remat, no matter how complicated (probably all needed
2646 resources for it are live everywhere, and don't take
2647 additional register resources). */
2648 /* XXX Currently we can't use patterns which contain
2649 pseudos, _even_ if they are stable. The code simply isn't
2650 prepared for that. All those operands can't be spilled (or
2651 the dependent remat webs are not remat anymore), so they
2652 would be oldwebs in the next iteration. But currently
2653 oldwebs can't have their references changed. The
2654 incremental machinery barfs on that. */
2655 || (!rtx_unstable_p (src) && !contains_pseudo (src))
2656 /* Additionally also memrefs to stack-slots are usefull, when
2657 we created them ourself. They might not have set their
2658 unchanging flag set, but nevertheless they are stable across
2659 the livetime in question. */
2660 || (GET_CODE (src) == MEM
2661 && INSN_UID (insn) >= orig_max_uid
2662 && memref_is_stack_slot (src)))
2663 /* And we must be able to construct an insn without
2664 side-effects to actually load that value into a reg. */
2665 && want_to_remat (src))
2666 pat = src;
2667 else
2668 break;
2670 if (pat && i == web->num_defs)
2671 web->pattern = pat;
2675 /* Determine the spill costs of all webs. */
2677 static void
2678 determine_web_costs ()
2680 struct dlist *d;
2681 for (d = WEBS(INITIAL); d; d = d->next)
2683 unsigned int i, num_loads;
2684 int load_cost, store_cost;
2685 unsigned HOST_WIDE_INT w;
2686 struct web *web = DLIST_WEB (d);
2687 if (web->type == PRECOLORED)
2688 continue;
2689 /* Get costs for one load/store. Note that we offset them by 1,
2690 because some patterns have a zero rtx_cost(), but we of course
2691 still need the actual load/store insns. With zero all those
2692 webs would be the same, no matter how often and where
2693 they are used. */
2694 if (web->pattern)
2696 /* This web is rematerializable. Beware, we set store_cost to
2697 zero optimistically assuming, that we indeed don't emit any
2698 stores in the spill-code addition. This might be wrong if
2699 at the point of the load not all needed resources are
2700 available, in which case we emit a stack-based load, for
2701 which we in turn need the according stores. */
2702 load_cost = 1 + rtx_cost (web->pattern, 0);
2703 store_cost = 0;
2705 else
2707 load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2708 web->regclass, 1);
2709 store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2710 web->regclass, 0);
2712 /* We create only loads at deaths, whose number is in span_deaths. */
2713 num_loads = MIN (web->span_deaths, web->num_uses);
2714 for (w = 0, i = 0; i < web->num_uses; i++)
2715 w += DF_REF_BB (web->uses[i])->frequency + 1;
2716 if (num_loads < web->num_uses)
2717 w = (w * num_loads + web->num_uses - 1) / web->num_uses;
2718 web->spill_cost = w * load_cost;
2719 if (store_cost)
2721 for (w = 0, i = 0; i < web->num_defs; i++)
2722 w += DF_REF_BB (web->defs[i])->frequency + 1;
2723 web->spill_cost += w * store_cost;
2725 web->orig_spill_cost = web->spill_cost;
2729 /* Detect webs which are set in a conditional jump insn (possibly a
2730 decrement-and-branch type of insn), and mark them not to be
2731 spillable. The stores for them would need to be placed on edges,
2732 which destroys the CFG. (Somewhen we want to deal with that XXX) */
2734 static void
2735 detect_webs_set_in_cond_jump ()
2737 basic_block bb;
2738 FOR_EACH_BB (bb)
2739 if (GET_CODE (bb->end) == JUMP_INSN)
2741 struct df_link *link;
2742 for (link = DF_INSN_DEFS (df, bb->end); link; link = link->next)
2743 if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
2745 struct web *web = def2web[DF_REF_ID (link->ref)];
2746 web->orig_spill_temp = web->spill_temp = 3;
2751 /* Second top-level function of this file.
2752 Converts the connected web parts to full webs. This means, it allocates
2753 all webs, and initializes all fields, including detecting spill
2754 temporaries. It does not distribute moves to their corresponding webs,
2755 though. */
2757 static void
2758 make_webs (df)
2759 struct df *df;
2761 /* First build all the webs itself. They are not related with
2762 others yet. */
2763 parts_to_webs (df);
2764 /* Now detect spill temporaries to initialize their usable_regs set. */
2765 detect_spill_temps ();
2766 detect_webs_set_in_cond_jump ();
2767 /* And finally relate them to each other, meaning to record all possible
2768 conflicts between webs (see the comment there). */
2769 conflicts_between_webs (df);
2770 detect_remat_webs ();
2771 determine_web_costs ();
2774 /* Distribute moves to the corresponding webs. */
2776 static void
2777 moves_to_webs (df)
2778 struct df *df;
2780 struct df_link *link;
2781 struct move_list *ml;
2783 /* Distribute all moves to their corresponding webs, making sure,
2784 each move is in a web maximally one time (happens on some strange
2785 insns). */
2786 for (ml = wl_moves; ml; ml = ml->next)
2788 struct move *m = ml->move;
2789 struct web *web;
2790 struct move_list *newml;
2791 if (!m)
2792 continue;
2793 m->type = WORKLIST;
2794 m->dlink = NULL;
2795 /* Multiple defs/uses can happen in moves involving hard-regs in
2796 a wider mode. For those df.* creates use/def references for each
2797 real hard-reg involved. For coalescing we are interested in
2798 the smallest numbered hard-reg. */
2799 for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next)
2800 if (link->ref)
2802 web = def2web[DF_REF_ID (link->ref)];
2803 web = find_web_for_subweb (web);
2804 if (!m->target_web || web->regno < m->target_web->regno)
2805 m->target_web = web;
2807 for (link = DF_INSN_USES (df, m->insn); link; link = link->next)
2808 if (link->ref)
2810 web = use2web[DF_REF_ID (link->ref)];
2811 web = find_web_for_subweb (web);
2812 if (!m->source_web || web->regno < m->source_web->regno)
2813 m->source_web = web;
2815 if (m->source_web && m->target_web
2816 /* If the usable_regs don't intersect we can't coalesce the two
2817 webs anyway, as this is no simple copy insn (it might even
2818 need an intermediate stack temp to execute this "copy" insn). */
2819 && hard_regs_intersect_p (&m->source_web->usable_regs,
2820 &m->target_web->usable_regs))
2822 if (!flag_ra_optimistic_coalescing)
2824 struct move_list *test = m->source_web->moves;
2825 for (; test && test->move != m; test = test->next);
2826 if (! test)
2828 newml = (struct move_list*)
2829 ra_alloc (sizeof (struct move_list));
2830 newml->move = m;
2831 newml->next = m->source_web->moves;
2832 m->source_web->moves = newml;
2834 test = m->target_web->moves;
2835 for (; test && test->move != m; test = test->next);
2836 if (! test)
2838 newml = (struct move_list*)
2839 ra_alloc (sizeof (struct move_list));
2840 newml->move = m;
2841 newml->next = m->target_web->moves;
2842 m->target_web->moves = newml;
2846 else
2847 /* Delete this move. */
2848 ml->move = NULL;
2852 /* Handle tricky asm insns.
2853 Supposed to create conflicts to hardregs which aren't allowed in
2854 the constraints. Doesn't actually do that, as it might confuse
2855 and constrain the allocator too much. */
2857 static void
2858 handle_asm_insn (df, insn)
2859 struct df *df;
2860 rtx insn;
2862 const char *constraints[MAX_RECOG_OPERANDS];
2863 enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
2864 int i, noperands, in_output;
2865 HARD_REG_SET clobbered, allowed, conflict;
2866 rtx pat;
2867 if (! INSN_P (insn)
2868 || (noperands = asm_noperands (PATTERN (insn))) < 0)
2869 return;
2870 pat = PATTERN (insn);
2871 CLEAR_HARD_REG_SET (clobbered);
2873 if (GET_CODE (pat) == PARALLEL)
2874 for (i = 0; i < XVECLEN (pat, 0); i++)
2876 rtx t = XVECEXP (pat, 0, i);
2877 if (GET_CODE (t) == CLOBBER && GET_CODE (XEXP (t, 0)) == REG
2878 && REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
2879 SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
2882 decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc,
2883 constraints, operand_mode);
2884 in_output = 1;
2885 for (i = 0; i < noperands; i++)
2887 const char *p = constraints[i];
2888 int cls = (int) NO_REGS;
2889 struct df_link *link;
2890 rtx reg;
2891 struct web *web;
2892 int nothing_allowed = 1;
2893 reg = recog_data.operand[i];
2895 /* Look, if the constraints apply to a pseudo reg, and not to
2896 e.g. a mem. */
2897 while (GET_CODE (reg) == SUBREG
2898 || GET_CODE (reg) == ZERO_EXTRACT
2899 || GET_CODE (reg) == SIGN_EXTRACT
2900 || GET_CODE (reg) == STRICT_LOW_PART)
2901 reg = XEXP (reg, 0);
2902 if (GET_CODE (reg) != REG || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2903 continue;
2905 /* Search the web corresponding to this operand. We depend on
2906 that decode_asm_operands() places the output operands
2907 before the input operands. */
2908 while (1)
2910 if (in_output)
2911 link = df->insns[INSN_UID (insn)].defs;
2912 else
2913 link = df->insns[INSN_UID (insn)].uses;
2914 while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg)
2915 link = link->next;
2916 if (!link || !link->ref)
2918 if (in_output)
2919 in_output = 0;
2920 else
2921 abort ();
2923 else
2924 break;
2926 if (in_output)
2927 web = def2web[DF_REF_ID (link->ref)];
2928 else
2929 web = use2web[DF_REF_ID (link->ref)];
2930 reg = DF_REF_REG (link->ref);
2932 /* Find the constraints, noting the allowed hardregs in allowed. */
2933 CLEAR_HARD_REG_SET (allowed);
2934 while (1)
2936 char c = *p;
2938 if (c == '\0' || c == ',' || c == '#')
2940 /* End of one alternative - mark the regs in the current
2941 class, and reset the class. */
2942 p++;
2943 IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
2944 if (cls != NO_REGS)
2945 nothing_allowed = 0;
2946 cls = NO_REGS;
2947 if (c == '#')
2948 do {
2949 c = *p++;
2950 } while (c != '\0' && c != ',');
2951 if (c == '\0')
2952 break;
2953 continue;
2956 switch (c)
2958 case '=': case '+': case '*': case '%': case '?': case '!':
2959 case '0': case '1': case '2': case '3': case '4': case 'm':
2960 case '<': case '>': case 'V': case 'o': case '&': case 'E':
2961 case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
2962 case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
2963 case 'P':
2964 break;
2966 case 'p':
2967 cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
2968 nothing_allowed = 0;
2969 break;
2971 case 'g':
2972 case 'r':
2973 cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
2974 nothing_allowed = 0;
2975 break;
2977 default:
2978 cls =
2979 (int) reg_class_subunion[cls][(int)
2980 REG_CLASS_FROM_CONSTRAINT (c,
2981 p)];
2983 p += CONSTRAINT_LEN (c, p);
2986 /* Now make conflicts between this web, and all hardregs, which
2987 are not allowed by the constraints. */
2988 if (nothing_allowed)
2990 /* If we had no real constraints nothing was explicitly
2991 allowed, so we allow the whole class (i.e. we make no
2992 additional conflicts). */
2993 CLEAR_HARD_REG_SET (conflict);
2995 else
2997 COPY_HARD_REG_SET (conflict, usable_regs
2998 [reg_preferred_class (web->regno)]);
2999 IOR_HARD_REG_SET (conflict, usable_regs
3000 [reg_alternate_class (web->regno)]);
3001 AND_COMPL_HARD_REG_SET (conflict, allowed);
3002 /* We can't yet establish these conflicts. Reload must go first
3003 (or better said, we must implement some functionality of reload).
3004 E.g. if some operands must match, and they need the same color
3005 we don't see yet, that they do not conflict (because they match).
3006 For us it looks like two normal references with different DEFs,
3007 so they conflict, and as they both need the same color, the
3008 graph becomes uncolorable. */
3009 #if 0
3010 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
3011 if (TEST_HARD_REG_BIT (conflict, c))
3012 record_conflict (web, hardreg2web[c]);
3013 #endif
3015 if (rtl_dump_file)
3017 int c;
3018 ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
3019 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
3020 if (TEST_HARD_REG_BIT (conflict, c))
3021 ra_debug_msg (DUMP_ASM, " %d", c);
3022 ra_debug_msg (DUMP_ASM, "\n");
3027 /* The real toplevel function in this file.
3028 Build (or rebuilds) the complete interference graph with webs
3029 and conflicts. */
3031 void
3032 build_i_graph (df)
3033 struct df *df;
3035 rtx insn;
3037 init_web_parts (df);
3039 sbitmap_zero (move_handled);
3040 wl_moves = NULL;
3042 build_web_parts_and_conflicts (df);
3044 /* For read-modify-write instructions we may have created two webs.
3045 Reconnect them here. (s.a.) */
3046 connect_rmw_web_parts (df);
3048 /* The webs are conceptually complete now, but still scattered around as
3049 connected web parts. Collect all information and build the webs
3050 including all conflicts between webs (instead web parts). */
3051 make_webs (df);
3052 moves_to_webs (df);
3054 /* Look for additional constraints given by asms. */
3055 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3056 handle_asm_insn (df, insn);
3059 /* Allocates or reallocates most memory for the interference graph and
3060 associated structures. If it reallocates memory (meaning, this is not
3061 the first pass), this also changes some structures to reflect the
3062 additional entries in various array, and the higher number of
3063 defs and uses. */
3065 void
3066 ra_build_realloc (df)
3067 struct df *df;
3069 struct web_part *last_web_parts = web_parts;
3070 struct web **last_def2web = def2web;
3071 struct web **last_use2web = use2web;
3072 sbitmap last_live_over_abnormal = live_over_abnormal;
3073 unsigned int i;
3074 struct dlist *d;
3075 move_handled = sbitmap_alloc (get_max_uid () );
3076 web_parts = (struct web_part *) xcalloc (df->def_id + df->use_id,
3077 sizeof web_parts[0]);
3078 def2web = (struct web **) xcalloc (df->def_id + df->use_id,
3079 sizeof def2web[0]);
3080 use2web = &def2web[df->def_id];
3081 live_over_abnormal = sbitmap_alloc (df->use_id);
3082 sbitmap_zero (live_over_abnormal);
3084 /* First go through all old defs and uses. */
3085 for (i = 0; i < last_def_id + last_use_id; i++)
3087 /* And relocate them to the new array. This is made ugly by the
3088 fact, that defs and uses are placed consecutive into one array. */
3089 struct web_part *dest = &web_parts[i < last_def_id
3090 ? i : (df->def_id + i - last_def_id)];
3091 struct web_part *up;
3092 *dest = last_web_parts[i];
3093 up = dest->uplink;
3094 dest->uplink = NULL;
3096 /* Also relocate the uplink to point into the new array. */
3097 if (up && up->ref)
3099 unsigned int id = DF_REF_ID (up->ref);
3100 if (up < &last_web_parts[last_def_id])
3102 if (df->defs[id])
3103 dest->uplink = &web_parts[DF_REF_ID (up->ref)];
3105 else if (df->uses[id])
3106 dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)];
3110 /* Also set up the def2web and use2web arrays, from the last pass.i
3111 Remember also the state of live_over_abnormal. */
3112 for (i = 0; i < last_def_id; i++)
3114 struct web *web = last_def2web[i];
3115 if (web)
3117 web = find_web_for_subweb (web);
3118 if (web->type != FREE && web->type != PRECOLORED)
3119 def2web[i] = last_def2web[i];
3122 for (i = 0; i < last_use_id; i++)
3124 struct web *web = last_use2web[i];
3125 if (web)
3127 web = find_web_for_subweb (web);
3128 if (web->type != FREE && web->type != PRECOLORED)
3129 use2web[i] = last_use2web[i];
3131 if (TEST_BIT (last_live_over_abnormal, i))
3132 SET_BIT (live_over_abnormal, i);
3135 /* We don't have any subwebs for now. Somewhen we might want to
3136 remember them too, instead of recreating all of them every time.
3137 The problem is, that which subwebs we need, depends also on what
3138 other webs and subwebs exist, and which conflicts are there.
3139 OTOH it should be no problem, if we had some more subwebs than strictly
3140 needed. Later. */
3141 for (d = WEBS(FREE); d; d = d->next)
3143 struct web *web = DLIST_WEB (d);
3144 struct web *wnext;
3145 for (web = web->subreg_next; web; web = wnext)
3147 wnext = web->subreg_next;
3148 free (web);
3150 DLIST_WEB (d)->subreg_next = NULL;
3153 /* The uses we anyway are going to check, are not yet live over an abnormal
3154 edge. In fact, they might actually not anymore, due to added
3155 loads. */
3156 if (last_check_uses)
3157 sbitmap_difference (live_over_abnormal, live_over_abnormal,
3158 last_check_uses);
3160 if (last_def_id || last_use_id)
3162 sbitmap_free (last_live_over_abnormal);
3163 free (last_web_parts);
3164 free (last_def2web);
3166 if (!last_max_uid)
3168 /* Setup copy cache, for copy_insn_p (). */
3169 copy_cache = (struct copy_p_cache *)
3170 xcalloc (get_max_uid (), sizeof (copy_cache[0]));
3171 init_bb_info ();
3173 else
3175 copy_cache = (struct copy_p_cache *)
3176 xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
3177 memset (&copy_cache[last_max_uid], 0,
3178 (get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
3182 /* Free up/clear some memory, only needed for one pass. */
3184 void
3185 ra_build_free ()
3187 struct dlist *d;
3188 unsigned int i;
3190 /* Clear the moves associated with a web (we also need to look into
3191 subwebs here). */
3192 for (i = 0; i < num_webs; i++)
3194 struct web *web = ID2WEB (i);
3195 if (!web)
3196 abort ();
3197 if (i >= num_webs - num_subwebs
3198 && (web->conflict_list || web->orig_conflict_list))
3199 abort ();
3200 web->moves = NULL;
3202 /* All webs in the free list have no defs or uses anymore. */
3203 for (d = WEBS(FREE); d; d = d->next)
3205 struct web *web = DLIST_WEB (d);
3206 if (web->defs)
3207 free (web->defs);
3208 web->defs = NULL;
3209 if (web->uses)
3210 free (web->uses);
3211 web->uses = NULL;
3212 /* We can't free the subwebs here, as they are referenced from
3213 def2web[], and possibly needed in the next ra_build_realloc().
3214 We free them there (or in free_all_mem()). */
3217 /* Free all conflict bitmaps from web parts. Note that we clear
3218 _all_ these conflicts, and don't rebuild them next time for uses
3219 which aren't rechecked. This mean, that those conflict bitmaps
3220 only contain the incremental information. The cumulative one
3221 is still contained in the edges of the I-graph, i.e. in
3222 conflict_list (or orig_conflict_list) of the webs. */
3223 for (i = 0; i < df->def_id + df->use_id; i++)
3225 struct tagged_conflict *cl;
3226 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3228 if (cl->conflicts)
3229 BITMAP_XFREE (cl->conflicts);
3231 web_parts[i].sub_conflicts = NULL;
3234 wl_moves = NULL;
3236 free (id2web);
3237 free (move_handled);
3238 sbitmap_free (sup_igraph);
3239 sbitmap_free (igraph);
3242 /* Free all memory for the interference graph structures. */
3244 void
3245 ra_build_free_all (df)
3246 struct df *df;
3248 unsigned int i;
3250 free_bb_info ();
3251 free (copy_cache);
3252 copy_cache = NULL;
3253 for (i = 0; i < df->def_id + df->use_id; i++)
3255 struct tagged_conflict *cl;
3256 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3258 if (cl->conflicts)
3259 BITMAP_XFREE (cl->conflicts);
3261 web_parts[i].sub_conflicts = NULL;
3263 sbitmap_free (live_over_abnormal);
3264 free (web_parts);
3265 web_parts = NULL;
3266 if (last_check_uses)
3267 sbitmap_free (last_check_uses);
3268 last_check_uses = NULL;
3269 free (def2web);
3270 use2web = NULL;
3271 def2web = NULL;
3274 #include "gt-ra-build.h"
3277 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: