* config/arm/elf.h (ASM_OUTPUT_ALIGNED_COMMON): Remove definition.
[official-gcc.git] / gcc / ra-build.c
blobffb5c9b46ae68a84e7e4c860e071a581cd9bdb9f
1 /* Graph coloring register allocator
2 Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Michael Matz <matz@suse.de>
4 and Daniel Berlin <dan@cgsoftware.com>
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under the
9 terms of the GNU General Public License as published by the Free Software
10 Foundation; either version 2, or (at your option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
15 details.
17 You should have received a copy of the GNU General Public License along
18 with GCC; see the file COPYING. If not, write to the Free Software
19 Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "insn-config.h"
28 #include "recog.h"
29 #include "reload.h"
30 #include "function.h"
31 #include "regs.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
34 #include "df.h"
35 #include "output.h"
36 #include "ggc.h"
37 #include "ra.h"
39 /* This file is part of the graph coloring register allocator.
40 It deals with building the interference graph. When rebuilding
41 the graph for a function after spilling, we rebuild only those
42 parts needed, i.e. it works incrementally.
44 The first part (the functions called from build_web_parts_and_conflicts()
45 ) constructs a web_part for each pseudo register reference in the insn
46 stream, then goes backward from each use, until it reaches defs for that
47 pseudo. While going back it remember seen defs for other registers as
48 conflicts. By connecting the uses and defs, which reach each other, webs
49 (or live ranges) are built conceptually.
51 The second part (make_webs() and children) deals with converting that
52 structure to the nodes and edges, on which our interference graph is
53 built. For each root web part constructed above, an instance of struct
54 web is created. For all subregs of pseudos, which matter for allocation,
55 a subweb of the corresponding super web is built. Finally all the
56 conflicts noted in the first part (as bitmaps) are transformed into real
57 edges.
59 As part of that process the webs are also classified (their spill cost
60 is calculated, and if they are spillable at all, and if not, for what
61 reason; or if they are rematerializable), and move insns are collected,
62 which are potentially coalescable.
64 The top-level entry of this file (build_i_graph) puts it all together,
65 and leaves us with a complete interference graph, which just has to
66 be colored. */
69 struct curr_use;
71 static unsigned HOST_WIDE_INT rtx_to_undefined PARAMS ((rtx));
72 static bitmap find_sub_conflicts PARAMS ((struct web_part *, unsigned int));
73 static bitmap get_sub_conflicts PARAMS ((struct web_part *, unsigned int));
74 static unsigned int undef_to_size_word PARAMS ((rtx, unsigned HOST_WIDE_INT *));
75 static bitmap undef_to_bitmap PARAMS ((struct web_part *,
76 unsigned HOST_WIDE_INT *));
77 static struct web_part * find_web_part_1 PARAMS ((struct web_part *));
78 static struct web_part * union_web_part_roots
79 PARAMS ((struct web_part *, struct web_part *));
80 static int defuse_overlap_p_1 PARAMS ((rtx, struct curr_use *));
81 static int live_out_1 PARAMS ((struct df *, struct curr_use *, rtx));
82 static int live_out PARAMS ((struct df *, struct curr_use *, rtx));
83 static rtx live_in_edge PARAMS (( struct df *, struct curr_use *, edge));
84 static void live_in PARAMS ((struct df *, struct curr_use *, rtx));
85 static int copy_insn_p PARAMS ((rtx, rtx *, rtx *));
86 static void remember_move PARAMS ((rtx));
87 static void handle_asm_insn PARAMS ((struct df *, rtx));
88 static void prune_hardregs_for_mode PARAMS ((HARD_REG_SET *,
89 enum machine_mode));
90 static void init_one_web_common PARAMS ((struct web *, rtx));
91 static void init_one_web PARAMS ((struct web *, rtx));
92 static void reinit_one_web PARAMS ((struct web *, rtx));
93 static struct web * add_subweb PARAMS ((struct web *, rtx));
94 static struct web * add_subweb_2 PARAMS ((struct web *, unsigned int));
95 static void init_web_parts PARAMS ((struct df *));
96 static void copy_conflict_list PARAMS ((struct web *));
97 static void add_conflict_edge PARAMS ((struct web *, struct web *));
98 static void build_inverse_webs PARAMS ((struct web *));
99 static void copy_web PARAMS ((struct web *, struct web_link **));
100 static void compare_and_free_webs PARAMS ((struct web_link **));
101 static void init_webs_defs_uses PARAMS ((void));
102 static unsigned int parts_to_webs_1 PARAMS ((struct df *, struct web_link **,
103 struct df_link *));
104 static void parts_to_webs PARAMS ((struct df *));
105 static void reset_conflicts PARAMS ((void));
106 #if 0
107 static void check_conflict_numbers PARAMS ((void));
108 #endif
109 static void conflicts_between_webs PARAMS ((struct df *));
110 static void remember_web_was_spilled PARAMS ((struct web *));
111 static void detect_spill_temps PARAMS ((void));
112 static int contains_pseudo PARAMS ((rtx));
113 static int want_to_remat PARAMS ((rtx x));
114 static void detect_remat_webs PARAMS ((void));
115 static void determine_web_costs PARAMS ((void));
116 static void detect_webs_set_in_cond_jump PARAMS ((void));
117 static void make_webs PARAMS ((struct df *));
118 static void moves_to_webs PARAMS ((struct df *));
119 static void connect_rmw_web_parts PARAMS ((struct df *));
120 static void update_regnos_mentioned PARAMS ((void));
121 static void livethrough_conflicts_bb PARAMS ((basic_block));
122 static void init_bb_info PARAMS ((void));
123 static void free_bb_info PARAMS ((void));
124 static void build_web_parts_and_conflicts PARAMS ((struct df *));
127 /* A sbitmap of DF_REF_IDs of uses, which are live over an abnormal
128 edge. */
129 static sbitmap live_over_abnormal;
131 /* To cache if we already saw a certain edge while analyzing one
132 use, we use a tick count incremented per use. */
133 static unsigned int visited_pass;
135 /* A sbitmap of UIDs of move insns, which we already analyzed. */
136 static sbitmap move_handled;
138 /* One such structed is allocated per insn, and traces for the currently
139 analyzed use, which web part belongs to it, and how many bytes of
140 it were still undefined when that insn was reached. */
141 struct visit_trace
143 struct web_part *wp;
144 unsigned HOST_WIDE_INT undefined;
146 /* Indexed by UID. */
147 static struct visit_trace *visit_trace;
149 /* Per basic block we have one such structure, used to speed up
150 the backtracing of uses. */
151 struct ra_bb_info
153 /* The value of visited_pass, as the first insn of this BB was reached
154 the last time. If this equals the current visited_pass, then
155 undefined is valid. Otherwise not. */
156 unsigned int pass;
157 /* The still undefined bytes at that time. The use to which this is
158 relative is the current use. */
159 unsigned HOST_WIDE_INT undefined;
160 /* Bit regno is set, if that regno is mentioned in this BB as a def, or
161 the source of a copy insn. In these cases we can not skip the whole
162 block if we reach it from the end. */
163 bitmap regnos_mentioned;
164 /* If a use reaches the end of a BB, and that BB doesn't mention its
165 regno, we skip the block, and remember the ID of that use
166 as living throughout the whole block. */
167 bitmap live_throughout;
168 /* The content of the aux field before placing a pointer to this
169 structure there. */
170 void *old_aux;
173 /* We need a fast way to describe a certain part of a register.
174 Therefore we put together the size and offset (in bytes) in one
175 integer. */
176 #define BL_TO_WORD(b, l) ((((b) & 0xFFFF) << 16) | ((l) & 0xFFFF))
177 #define BYTE_BEGIN(i) (((unsigned int)(i) >> 16) & 0xFFFF)
178 #define BYTE_LENGTH(i) ((unsigned int)(i) & 0xFFFF)
180 /* For a REG or SUBREG expression X return the size/offset pair
181 as an integer. */
183 unsigned int
184 rtx_to_bits (x)
185 rtx x;
187 unsigned int len, beg;
188 len = GET_MODE_SIZE (GET_MODE (x));
189 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
190 return BL_TO_WORD (beg, len);
193 /* X is a REG or SUBREG rtx. Return the bytes it touches as a bitmask. */
195 static unsigned HOST_WIDE_INT
196 rtx_to_undefined (x)
197 rtx x;
199 unsigned int len, beg;
200 unsigned HOST_WIDE_INT ret;
201 len = GET_MODE_SIZE (GET_MODE (x));
202 beg = (GET_CODE (x) == SUBREG) ? SUBREG_BYTE (x) : 0;
203 ret = ~ ((unsigned HOST_WIDE_INT) 0);
204 ret = (~(ret << len)) << beg;
205 return ret;
208 /* We remember if we've analyzed an insn for being a move insn, and if yes
209 between which operands. */
210 struct copy_p_cache
212 int seen;
213 rtx source;
214 rtx target;
217 /* On demand cache, for if insns are copy insns, and if yes, what
218 source/target they have. */
219 static struct copy_p_cache * copy_cache;
221 int *number_seen;
223 /* For INSN, return nonzero, if it's a move insn, we consider to coalesce
224 later, and place the operands in *SOURCE and *TARGET, if they are
225 not NULL. */
227 static int
228 copy_insn_p (insn, source, target)
229 rtx insn;
230 rtx *source;
231 rtx *target;
233 rtx d, s;
234 unsigned int d_regno, s_regno;
235 int uid = INSN_UID (insn);
237 if (!INSN_P (insn))
238 abort ();
240 /* First look, if we already saw this insn. */
241 if (copy_cache[uid].seen)
243 /* And if we saw it, if it's actually a copy insn. */
244 if (copy_cache[uid].seen == 1)
246 if (source)
247 *source = copy_cache[uid].source;
248 if (target)
249 *target = copy_cache[uid].target;
250 return 1;
252 return 0;
255 /* Mark it as seen, but not being a copy insn. */
256 copy_cache[uid].seen = 2;
257 insn = single_set (insn);
258 if (!insn)
259 return 0;
260 d = SET_DEST (insn);
261 s = SET_SRC (insn);
263 /* We recognize moves between subreg's as copy insns. This is used to avoid
264 conflicts of those subwebs. But they are currently _not_ used for
265 coalescing (the check for this is in remember_move() below). */
266 while (GET_CODE (d) == STRICT_LOW_PART)
267 d = XEXP (d, 0);
268 if (GET_CODE (d) != REG
269 && (GET_CODE (d) != SUBREG || GET_CODE (SUBREG_REG (d)) != REG))
270 return 0;
271 while (GET_CODE (s) == STRICT_LOW_PART)
272 s = XEXP (s, 0);
273 if (GET_CODE (s) != REG
274 && (GET_CODE (s) != SUBREG || GET_CODE (SUBREG_REG (s)) != REG))
275 return 0;
277 s_regno = (unsigned) REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
278 d_regno = (unsigned) REGNO (GET_CODE (d) == SUBREG ? SUBREG_REG (d) : d);
280 /* Copies between hardregs are useless for us, as not coalesable anyway. */
281 if ((s_regno < FIRST_PSEUDO_REGISTER
282 && d_regno < FIRST_PSEUDO_REGISTER)
283 || s_regno >= max_normal_pseudo
284 || d_regno >= max_normal_pseudo)
285 return 0;
287 if (source)
288 *source = s;
289 if (target)
290 *target = d;
292 /* Still mark it as seen, but as a copy insn this time. */
293 copy_cache[uid].seen = 1;
294 copy_cache[uid].source = s;
295 copy_cache[uid].target = d;
296 return 1;
299 /* We build webs, as we process the conflicts. For each use we go upward
300 the insn stream, noting any defs as potentially conflicting with the
301 current use. We stop at defs of the current regno. The conflicts are only
302 potentially, because we may never reach a def, so this is an undefined use,
303 which conflicts with nothing. */
306 /* Given a web part WP, and the location of a reg part SIZE_WORD
307 return the conflict bitmap for that reg part, or NULL if it doesn't
308 exist yet in WP. */
310 static bitmap
311 find_sub_conflicts (wp, size_word)
312 struct web_part *wp;
313 unsigned int size_word;
315 struct tagged_conflict *cl;
316 cl = wp->sub_conflicts;
317 for (; cl; cl = cl->next)
318 if (cl->size_word == size_word)
319 return cl->conflicts;
320 return NULL;
323 /* Similar to find_sub_conflicts(), but creates that bitmap, if it
324 doesn't exist. I.e. this never returns NULL. */
326 static bitmap
327 get_sub_conflicts (wp, size_word)
328 struct web_part *wp;
329 unsigned int size_word;
331 bitmap b = find_sub_conflicts (wp, size_word);
332 if (!b)
334 struct tagged_conflict *cl =
335 (struct tagged_conflict *) ra_alloc (sizeof *cl);
336 cl->conflicts = BITMAP_XMALLOC ();
337 cl->size_word = size_word;
338 cl->next = wp->sub_conflicts;
339 wp->sub_conflicts = cl;
340 b = cl->conflicts;
342 return b;
345 /* Helper table for undef_to_size_word() below for small values
346 of UNDEFINED. Offsets and lengths are byte based. */
347 static struct undef_table_s {
348 unsigned int new_undef;
349 /* size | (byte << 16) */
350 unsigned int size_word;
351 } const undef_table [] = {
352 { 0, BL_TO_WORD (0, 0)}, /* 0 */
353 { 0, BL_TO_WORD (0, 1)},
354 { 0, BL_TO_WORD (1, 1)},
355 { 0, BL_TO_WORD (0, 2)},
356 { 0, BL_TO_WORD (2, 1)}, /* 4 */
357 { 1, BL_TO_WORD (2, 1)},
358 { 2, BL_TO_WORD (2, 1)},
359 { 3, BL_TO_WORD (2, 1)},
360 { 0, BL_TO_WORD (3, 1)}, /* 8 */
361 { 1, BL_TO_WORD (3, 1)},
362 { 2, BL_TO_WORD (3, 1)},
363 { 3, BL_TO_WORD (3, 1)},
364 { 0, BL_TO_WORD (2, 2)}, /* 12 */
365 { 1, BL_TO_WORD (2, 2)},
366 { 2, BL_TO_WORD (2, 2)},
367 { 0, BL_TO_WORD (0, 4)}};
369 /* Interpret *UNDEFINED as bitmask where each bit corresponds to a byte.
370 A set bit means an undefined byte. Factor all undefined bytes into
371 groups, and return a size/ofs pair of consecutive undefined bytes,
372 but according to certain borders. Clear out those bits corresponding
373 to bytes overlaid by that size/ofs pair. REG is only used for
374 the mode, to detect if it's a floating mode or not.
376 For example: *UNDEFINED size+ofs new *UNDEFINED
377 1111 4+0 0
378 1100 2+2 0
379 1101 2+2 1
380 0001 1+0 0
381 10101 1+4 101
385 static unsigned int
386 undef_to_size_word (reg, undefined)
387 rtx reg;
388 unsigned HOST_WIDE_INT *undefined;
390 /* When only the lower four bits are possibly set, we use
391 a fast lookup table. */
392 if (*undefined <= 15)
394 struct undef_table_s u;
395 u = undef_table[*undefined];
396 *undefined = u.new_undef;
397 return u.size_word;
400 /* Otherwise we handle certain cases directly. */
401 if (*undefined <= 0xffff)
402 switch ((int) *undefined)
404 case 0x00f0 : *undefined = 0; return BL_TO_WORD (4, 4);
405 case 0x00ff : *undefined = 0; return BL_TO_WORD (0, 8);
406 case 0x0f00 : *undefined = 0; return BL_TO_WORD (8, 4);
407 case 0x0ff0 : *undefined = 0xf0; return BL_TO_WORD (8, 4);
408 case 0x0fff :
409 if (INTEGRAL_MODE_P (GET_MODE (reg)))
410 { *undefined = 0xff; return BL_TO_WORD (8, 4); }
411 else
412 { *undefined = 0; return BL_TO_WORD (0, 12); /* XFmode */ }
413 case 0xf000 : *undefined = 0; return BL_TO_WORD (12, 4);
414 case 0xff00 : *undefined = 0; return BL_TO_WORD (8, 8);
415 case 0xfff0 : *undefined = 0xf0; return BL_TO_WORD (8, 8);
416 case 0xffff : *undefined = 0; return BL_TO_WORD (0, 16);
419 /* And if nothing matched fall back to the general solution. For
420 now unknown undefined bytes are converted to sequences of maximal
421 length 4 bytes. We could make this larger if necessary. */
423 unsigned HOST_WIDE_INT u = *undefined;
424 int word;
425 struct undef_table_s tab;
426 for (word = 0; (u & 15) == 0; word += 4)
427 u >>= 4;
428 u = u & 15;
429 tab = undef_table[u];
430 u = tab.new_undef;
431 u = (*undefined & ~((unsigned HOST_WIDE_INT)15 << word)) | (u << word);
432 *undefined = u;
433 /* Size remains the same, only the begin is moved up move bytes. */
434 return tab.size_word + BL_TO_WORD (word, 0);
438 /* Put the above three functions together. For a set of undefined bytes
439 as bitmap *UNDEFINED, look for (create if necessary) and return the
440 corresponding conflict bitmap. Change *UNDEFINED to remove the bytes
441 covered by the part for that bitmap. */
443 static bitmap
444 undef_to_bitmap (wp, undefined)
445 struct web_part *wp;
446 unsigned HOST_WIDE_INT *undefined;
448 unsigned int size_word = undef_to_size_word (DF_REF_REAL_REG (wp->ref),
449 undefined);
450 return get_sub_conflicts (wp, size_word);
453 /* Returns the root of the web part P is a member of. Additionally
454 it compresses the path. P may not be NULL. */
456 static struct web_part *
457 find_web_part_1 (p)
458 struct web_part *p;
460 struct web_part *r = p;
461 struct web_part *p_next;
462 while (r->uplink)
463 r = r->uplink;
464 for (; p != r; p = p_next)
466 p_next = p->uplink;
467 p->uplink = r;
469 return r;
472 /* Fast macro for the common case (WP either being the root itself, or
473 the end of an already compressed path. */
475 #define find_web_part(wp) ((! (wp)->uplink) ? (wp) \
476 : (! (wp)->uplink->uplink) ? (wp)->uplink : find_web_part_1 (wp))
478 /* Unions together the parts R1 resp. R2 is a root of.
479 All dynamic information associated with the parts (number of spanned insns
480 and so on) is also merged.
481 The root of the resulting (possibly larger) web part is returned. */
483 static struct web_part *
484 union_web_part_roots (r1, r2)
485 struct web_part *r1, *r2;
487 if (r1 != r2)
489 /* The new root is the smaller (pointerwise) of both. This is crucial
490 to make the construction of webs from web parts work (so, when
491 scanning all parts, we see the roots before all its children).
492 Additionally this ensures, that if the web has a def at all, than
493 the root is a def (because all def parts are before use parts in the
494 web_parts[] array), or put another way, as soon, as the root of a
495 web part is not a def, this is an uninitialized web part. The
496 way we construct the I-graph ensures, that if a web is initialized,
497 then the first part we find (besides trivial 1 item parts) has a
498 def. */
499 if (r1 > r2)
501 struct web_part *h = r1;
502 r1 = r2;
503 r2 = h;
505 r2->uplink = r1;
506 num_webs--;
508 /* Now we merge the dynamic information of R1 and R2. */
509 r1->spanned_deaths += r2->spanned_deaths;
511 if (!r1->sub_conflicts)
512 r1->sub_conflicts = r2->sub_conflicts;
513 else if (r2->sub_conflicts)
514 /* We need to merge the conflict bitmaps from R2 into R1. */
516 struct tagged_conflict *cl1, *cl2;
517 /* First those from R2, which are also contained in R1.
518 We union the bitmaps, and free those from R2, resetting them
519 to 0. */
520 for (cl1 = r1->sub_conflicts; cl1; cl1 = cl1->next)
521 for (cl2 = r2->sub_conflicts; cl2; cl2 = cl2->next)
522 if (cl1->size_word == cl2->size_word)
524 bitmap_operation (cl1->conflicts, cl1->conflicts,
525 cl2->conflicts, BITMAP_IOR);
526 BITMAP_XFREE (cl2->conflicts);
527 cl2->conflicts = NULL;
529 /* Now the conflict lists from R2 which weren't in R1.
530 We simply copy the entries from R2 into R1' list. */
531 for (cl2 = r2->sub_conflicts; cl2;)
533 struct tagged_conflict *cl_next = cl2->next;
534 if (cl2->conflicts)
536 cl2->next = r1->sub_conflicts;
537 r1->sub_conflicts = cl2;
539 cl2 = cl_next;
542 r2->sub_conflicts = NULL;
543 r1->crosses_call |= r2->crosses_call;
545 return r1;
548 /* Convenience macro, that is capable of unioning also non-roots. */
549 #define union_web_parts(p1, p2) \
550 ((p1 == p2) ? find_web_part (p1) \
551 : union_web_part_roots (find_web_part (p1), find_web_part (p2)))
553 /* Remember that we've handled a given move, so we don't reprocess it. */
555 static void
556 remember_move (insn)
557 rtx insn;
559 if (!TEST_BIT (move_handled, INSN_UID (insn)))
561 rtx s, d;
562 SET_BIT (move_handled, INSN_UID (insn));
563 if (copy_insn_p (insn, &s, &d))
565 /* Some sanity test for the copy insn. */
566 struct df_link *slink = DF_INSN_USES (df, insn);
567 struct df_link *link = DF_INSN_DEFS (df, insn);
568 if (!link || !link->ref || !slink || !slink->ref)
569 abort ();
570 /* The following (link->next != 0) happens when a hardreg
571 is used in wider mode (REG:DI %eax). Then df.* creates
572 a def/use for each hardreg contained therein. We only
573 allow hardregs here. */
574 if (link->next
575 && DF_REF_REGNO (link->next->ref) >= FIRST_PSEUDO_REGISTER)
576 abort ();
578 else
579 abort ();
580 /* XXX for now we don't remember move insns involving any subregs.
581 Those would be difficult to coalesce (we would need to implement
582 handling of all the subwebs in the allocator, including that such
583 subwebs could be source and target of coalescing). */
584 if (GET_CODE (s) == REG && GET_CODE (d) == REG)
586 struct move *m = (struct move *) ra_calloc (sizeof (struct move));
587 struct move_list *ml;
588 m->insn = insn;
589 ml = (struct move_list *) ra_alloc (sizeof (struct move_list));
590 ml->move = m;
591 ml->next = wl_moves;
592 wl_moves = ml;
597 /* This describes the USE currently looked at in the main-loop in
598 build_web_parts_and_conflicts(). */
599 struct curr_use {
600 struct web_part *wp;
601 /* This has a 1-bit for each byte in the USE, which is still undefined. */
602 unsigned HOST_WIDE_INT undefined;
603 /* For easy access. */
604 unsigned int regno;
605 rtx x;
606 /* If some bits of this USE are live over an abnormal edge. */
607 unsigned int live_over_abnormal;
610 /* Returns nonzero iff rtx DEF and USE have bits in common (but see below).
611 It is only called with DEF and USE being (reg:M a) or (subreg:M1 (reg:M2 a)
612 x) rtx's. Furthermore if it's a subreg rtx M1 is at least one word wide,
613 and a is a multi-word pseudo. If DEF or USE are hardregs, they are in
614 word_mode, so we don't need to check for further hardregs which would result
615 from wider references. We are never called with paradoxical subregs.
617 This returns:
618 0 for no common bits,
619 1 if DEF and USE exactly cover the same bytes,
620 2 if the DEF only covers a part of the bits of USE
621 3 if the DEF covers more than the bits of the USE, and
622 4 if both are SUBREG's of different size, but have bytes in common.
623 -1 is a special case, for when DEF and USE refer to the same regno, but
624 have for other reasons no bits in common (can only happen with
625 subregs refering to different words, or to words which already were
626 defined for this USE).
627 Furthermore it modifies use->undefined to clear the bits which get defined
628 by DEF (only for cases with partial overlap).
629 I.e. if bit 1 is set for the result != -1, the USE was completely covered,
630 otherwise a test is needed to track the already defined bytes. */
632 static int
633 defuse_overlap_p_1 (def, use)
634 rtx def;
635 struct curr_use *use;
637 int mode = 0;
638 if (def == use->x)
639 return 1;
640 if (!def)
641 return 0;
642 if (GET_CODE (def) == SUBREG)
644 if (REGNO (SUBREG_REG (def)) != use->regno)
645 return 0;
646 mode |= 1;
648 else if (REGNO (def) != use->regno)
649 return 0;
650 if (GET_CODE (use->x) == SUBREG)
651 mode |= 2;
652 switch (mode)
654 case 0: /* REG, REG */
655 return 1;
656 case 1: /* SUBREG, REG */
658 unsigned HOST_WIDE_INT old_u = use->undefined;
659 use->undefined &= ~ rtx_to_undefined (def);
660 return (old_u != use->undefined) ? 2 : -1;
662 case 2: /* REG, SUBREG */
663 return 3;
664 case 3: /* SUBREG, SUBREG */
665 if (GET_MODE_SIZE (GET_MODE (def)) == GET_MODE_SIZE (GET_MODE (use->x)))
666 /* If the size of both things is the same, the subreg's overlap
667 if they refer to the same word. */
668 if (SUBREG_BYTE (def) == SUBREG_BYTE (use->x))
669 return 1;
670 /* Now the more difficult part: the same regno is refered, but the
671 sizes of the references or the words differ. E.g.
672 (subreg:SI (reg:CDI a) 0) and (subreg:DI (reg:CDI a) 2) do not
673 overlap, whereas the latter overlaps with (subreg:SI (reg:CDI a) 3).
676 unsigned HOST_WIDE_INT old_u;
677 int b1, e1, b2, e2;
678 unsigned int bl1, bl2;
679 bl1 = rtx_to_bits (def);
680 bl2 = rtx_to_bits (use->x);
681 b1 = BYTE_BEGIN (bl1);
682 b2 = BYTE_BEGIN (bl2);
683 e1 = b1 + BYTE_LENGTH (bl1) - 1;
684 e2 = b2 + BYTE_LENGTH (bl2) - 1;
685 if (b1 > e2 || b2 > e1)
686 return -1;
687 old_u = use->undefined;
688 use->undefined &= ~ rtx_to_undefined (def);
689 return (old_u != use->undefined) ? 4 : -1;
691 default:
692 abort ();
696 /* Macro for the common case of either def and use having the same rtx,
697 or based on different regnos. */
698 #define defuse_overlap_p(def, use) \
699 ((def) == (use)->x ? 1 : \
700 (REGNO (GET_CODE (def) == SUBREG \
701 ? SUBREG_REG (def) : def) != use->regno \
702 ? 0 : defuse_overlap_p_1 (def, use)))
705 /* The use USE flows into INSN (backwards). Determine INSNs effect on it,
706 and return nonzero, if (parts of) that USE are also live before it.
707 This also notes conflicts between the USE and all DEFS in that insn,
708 and modifies the undefined bits of USE in case parts of it were set in
709 this insn. */
711 static int
712 live_out_1 (df, use, insn)
713 struct df *df ATTRIBUTE_UNUSED;
714 struct curr_use *use;
715 rtx insn;
717 int defined = 0;
718 int uid = INSN_UID (insn);
719 struct web_part *wp = use->wp;
721 /* Mark, that this insn needs this webpart live. */
722 visit_trace[uid].wp = wp;
723 visit_trace[uid].undefined = use->undefined;
725 if (INSN_P (insn))
727 unsigned int source_regno = ~0;
728 unsigned int regno = use->regno;
729 unsigned HOST_WIDE_INT orig_undef = use->undefined;
730 unsigned HOST_WIDE_INT final_undef = use->undefined;
731 rtx s = NULL;
732 unsigned int n, num_defs = insn_df[uid].num_defs;
733 struct ref **defs = insn_df[uid].defs;
735 /* We want to access the root webpart. */
736 wp = find_web_part (wp);
737 if (GET_CODE (insn) == CALL_INSN)
738 wp->crosses_call = 1;
739 else if (copy_insn_p (insn, &s, NULL))
740 source_regno = REGNO (GET_CODE (s) == SUBREG ? SUBREG_REG (s) : s);
742 /* Look at all DEFS in this insn. */
743 for (n = 0; n < num_defs; n++)
745 struct ref *ref = defs[n];
746 int lap;
748 /* Reset the undefined bits for each iteration, in case this
749 insn has more than one set, and one of them sets this regno.
750 But still the original undefined part conflicts with the other
751 sets. */
752 use->undefined = orig_undef;
753 if ((lap = defuse_overlap_p (DF_REF_REG (ref), use)) != 0)
755 if (lap == -1)
756 /* Same regnos but non-overlapping or already defined bits,
757 so ignore this DEF, or better said make the yet undefined
758 part and this DEF conflicting. */
760 unsigned HOST_WIDE_INT undef;
761 undef = use->undefined;
762 while (undef)
763 bitmap_set_bit (undef_to_bitmap (wp, &undef),
764 DF_REF_ID (ref));
765 continue;
767 if ((lap & 1) != 0)
768 /* The current DEF completely covers the USE, so we can
769 stop traversing the code looking for further DEFs. */
770 defined = 1;
771 else
772 /* We have a partial overlap. */
774 final_undef &= use->undefined;
775 if (final_undef == 0)
776 /* Now the USE is completely defined, which means, that
777 we can stop looking for former DEFs. */
778 defined = 1;
779 /* If this is a partial overlap, which left some bits
780 in USE undefined, we normally would need to create
781 conflicts between that undefined part and the part of
782 this DEF which overlapped with some of the formerly
783 undefined bits. We don't need to do this, because both
784 parts of this DEF (that which overlaps, and that which
785 doesn't) are written together in this one DEF, and can
786 not be colored in a way which would conflict with
787 the USE. This is only true for partial overlap,
788 because only then the DEF and USE have bits in common,
789 which makes the DEF move, if the USE moves, making them
790 aligned.
791 If they have no bits in common (lap == -1), they are
792 really independent. Therefore we there made a
793 conflict above. */
795 /* This is at least a partial overlap, so we need to union
796 the web parts. */
797 wp = union_web_parts (wp, &web_parts[DF_REF_ID (ref)]);
799 else
801 /* The DEF and the USE don't overlap at all, different
802 regnos. I.e. make conflicts between the undefined bits,
803 and that DEF. */
804 unsigned HOST_WIDE_INT undef = use->undefined;
806 if (regno == source_regno)
807 /* This triggers only, when this was a copy insn and the
808 source is at least a part of the USE currently looked at.
809 In this case only the bits of the USE conflict with the
810 DEF, which are not covered by the source of this copy
811 insn, and which are still undefined. I.e. in the best
812 case (the whole reg being the source), _no_ conflicts
813 between that USE and this DEF (the target of the move)
814 are created by this insn (though they might be by
815 others). This is a super case of the normal copy insn
816 only between full regs. */
818 undef &= ~ rtx_to_undefined (s);
820 if (undef)
822 /*struct web_part *cwp;
823 cwp = find_web_part (&web_parts[DF_REF_ID
824 (ref)]);*/
826 /* TODO: somehow instead of noting the ID of the LINK
827 use an ID nearer to the root webpart of that LINK.
828 We can't use the root itself, because we later use the
829 ID to look at the form (reg or subreg, and if yes,
830 which subreg) of this conflict. This means, that we
831 need to remember in the root an ID for each form, and
832 maintaining this, when merging web parts. This makes
833 the bitmaps smaller. */
835 bitmap_set_bit (undef_to_bitmap (wp, &undef),
836 DF_REF_ID (ref));
837 while (undef);
841 if (defined)
842 use->undefined = 0;
843 else
845 /* If this insn doesn't completely define the USE, increment also
846 it's spanned deaths count (if this insn contains a death). */
847 if (uid >= death_insns_max_uid)
848 abort ();
849 if (TEST_BIT (insns_with_deaths, uid))
850 wp->spanned_deaths++;
851 use->undefined = final_undef;
855 return !defined;
858 /* Same as live_out_1() (actually calls it), but caches some information.
859 E.g. if we reached this INSN with the current regno already, and the
860 current undefined bits are a subset of those as we came here, we
861 simply connect the web parts of the USE, and the one cached for this
862 INSN, and additionally return zero, indicating we don't need to traverse
863 this path any longer (all effect were already seen, as we first reached
864 this insn). */
866 static inline int
867 live_out (df, use, insn)
868 struct df *df;
869 struct curr_use *use;
870 rtx insn;
872 unsigned int uid = INSN_UID (insn);
873 if (visit_trace[uid].wp
874 && DF_REF_REGNO (visit_trace[uid].wp->ref) == use->regno
875 && (use->undefined & ~visit_trace[uid].undefined) == 0)
877 union_web_parts (visit_trace[uid].wp, use->wp);
878 /* Don't search any further, as we already were here with this regno. */
879 return 0;
881 else
882 return live_out_1 (df, use, insn);
885 /* The current USE reached a basic block head. The edge E is one
886 of the predecessors edges. This evaluates the effect of the predecessor
887 block onto the USE, and returns the next insn, which should be looked at.
888 This either is the last insn of that pred. block, or the first one.
889 The latter happens, when the pred. block has no possible effect on the
890 USE, except for conflicts. In that case, it's remembered, that the USE
891 is live over that whole block, and it's skipped. Otherwise we simply
892 continue with the last insn of the block.
894 This also determines the effects of abnormal edges, and remembers
895 which uses are live at the end of that basic block. */
897 static rtx
898 live_in_edge (df, use, e)
899 struct df *df;
900 struct curr_use *use;
901 edge e;
903 struct ra_bb_info *info_pred;
904 rtx next_insn;
905 /* Call used hard regs die over an exception edge, ergo
906 they don't reach the predecessor block, so ignore such
907 uses. And also don't set the live_over_abnormal flag
908 for them. */
909 if ((e->flags & EDGE_EH) && use->regno < FIRST_PSEUDO_REGISTER
910 && call_used_regs[use->regno])
911 return NULL_RTX;
912 if (e->flags & EDGE_ABNORMAL)
913 use->live_over_abnormal = 1;
914 bitmap_set_bit (live_at_end[e->src->index], DF_REF_ID (use->wp->ref));
915 info_pred = (struct ra_bb_info *) e->src->aux;
916 next_insn = e->src->end;
918 /* If the last insn of the pred. block doesn't completely define the
919 current use, we need to check the block. */
920 if (live_out (df, use, next_insn))
922 /* If the current regno isn't mentioned anywhere in the whole block,
923 and the complete use is still undefined... */
924 if (!bitmap_bit_p (info_pred->regnos_mentioned, use->regno)
925 && (rtx_to_undefined (use->x) & ~use->undefined) == 0)
927 /* ...we can hop over the whole block and defer conflict
928 creation to later. */
929 bitmap_set_bit (info_pred->live_throughout,
930 DF_REF_ID (use->wp->ref));
931 next_insn = e->src->head;
933 return next_insn;
935 else
936 return NULL_RTX;
939 /* USE flows into the end of the insns preceding INSN. Determine
940 their effects (in live_out()) and possibly loop over the preceding INSN,
941 or call itself recursively on a basic block border. When a topleve
942 call of this function returns the USE is completely analyzed. I.e.
943 its def-use chain (at least) is built, possibly connected with other
944 def-use chains, and all defs during that chain are noted. */
946 static void
947 live_in (df, use, insn)
948 struct df *df;
949 struct curr_use *use;
950 rtx insn;
952 unsigned int loc_vpass = visited_pass;
954 /* Note, that, even _if_ we are called with use->wp a root-part, this might
955 become non-root in the for() loop below (due to live_out() unioning
956 it). So beware, not to change use->wp in a way, for which only root-webs
957 are allowed. */
958 while (1)
960 int uid = INSN_UID (insn);
961 basic_block bb = BLOCK_FOR_INSN (insn);
962 number_seen[uid]++;
964 /* We want to be as fast as possible, so explicitly write
965 this loop. */
966 for (insn = PREV_INSN (insn); insn && !INSN_P (insn);
967 insn = PREV_INSN (insn))
969 if (!insn)
970 return;
971 if (bb != BLOCK_FOR_INSN (insn))
973 edge e;
974 unsigned HOST_WIDE_INT undef = use->undefined;
975 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
976 if ((e = bb->pred) == NULL)
977 return;
978 /* We now check, if we already traversed the predecessors of this
979 block for the current pass and the current set of undefined
980 bits. If yes, we don't need to check the predecessors again.
981 So, conceptually this information is tagged to the first
982 insn of a basic block. */
983 if (info->pass == loc_vpass && (undef & ~info->undefined) == 0)
984 return;
985 info->pass = loc_vpass;
986 info->undefined = undef;
987 /* All but the last predecessor are handled recursively. */
988 for (; e->pred_next; e = e->pred_next)
990 insn = live_in_edge (df, use, e);
991 if (insn)
992 live_in (df, use, insn);
993 use->undefined = undef;
995 insn = live_in_edge (df, use, e);
996 if (!insn)
997 return;
999 else if (!live_out (df, use, insn))
1000 return;
1004 /* Determine all regnos which are mentioned in a basic block, in an
1005 interesting way. Interesting here means either in a def, or as the
1006 source of a move insn. We only look at insns added since the last
1007 pass. */
1009 static void
1010 update_regnos_mentioned ()
1012 int last_uid = last_max_uid;
1013 rtx insn;
1014 basic_block bb;
1015 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
1016 if (INSN_P (insn))
1018 /* Don't look at old insns. */
1019 if (INSN_UID (insn) < last_uid)
1021 /* XXX We should also remember moves over iterations (we already
1022 save the cache, but not the movelist). */
1023 if (copy_insn_p (insn, NULL, NULL))
1024 remember_move (insn);
1026 else if ((bb = BLOCK_FOR_INSN (insn)) != NULL)
1028 rtx source;
1029 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1030 bitmap mentioned = info->regnos_mentioned;
1031 struct df_link *link;
1032 if (copy_insn_p (insn, &source, NULL))
1034 remember_move (insn);
1035 bitmap_set_bit (mentioned,
1036 REGNO (GET_CODE (source) == SUBREG
1037 ? SUBREG_REG (source) : source));
1039 for (link = DF_INSN_DEFS (df, insn); link; link = link->next)
1040 if (link->ref)
1041 bitmap_set_bit (mentioned, DF_REF_REGNO (link->ref));
1046 /* Handle the uses which reach a block end, but were deferred due
1047 to it's regno not being mentioned in that block. This adds the
1048 remaining conflicts and updates also the crosses_call and
1049 spanned_deaths members. */
1051 static void
1052 livethrough_conflicts_bb (bb)
1053 basic_block bb;
1055 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1056 rtx insn;
1057 bitmap all_defs;
1058 int first, use_id;
1059 unsigned int deaths = 0;
1060 unsigned int contains_call = 0;
1062 /* If there are no deferred uses, just return. */
1063 if ((first = bitmap_first_set_bit (info->live_throughout)) < 0)
1064 return;
1066 /* First collect the IDs of all defs, count the number of death
1067 containing insns, and if there's some call_insn here. */
1068 all_defs = BITMAP_XMALLOC ();
1069 for (insn = bb->head; insn; insn = NEXT_INSN (insn))
1071 if (INSN_P (insn))
1073 unsigned int n;
1074 struct ra_insn_info info;
1076 info = insn_df[INSN_UID (insn)];
1077 for (n = 0; n < info.num_defs; n++)
1078 bitmap_set_bit (all_defs, DF_REF_ID (info.defs[n]));
1079 if (TEST_BIT (insns_with_deaths, INSN_UID (insn)))
1080 deaths++;
1081 if (GET_CODE (insn) == CALL_INSN)
1082 contains_call = 1;
1084 if (insn == bb->end)
1085 break;
1088 /* And now, if we have found anything, make all live_through
1089 uses conflict with all defs, and update their other members. */
1090 if (deaths > 0 || bitmap_first_set_bit (all_defs) >= 0)
1091 EXECUTE_IF_SET_IN_BITMAP (info->live_throughout, first, use_id,
1093 struct web_part *wp = &web_parts[df->def_id + use_id];
1094 unsigned int bl = rtx_to_bits (DF_REF_REG (wp->ref));
1095 bitmap conflicts;
1096 wp = find_web_part (wp);
1097 wp->spanned_deaths += deaths;
1098 wp->crosses_call |= contains_call;
1099 conflicts = get_sub_conflicts (wp, bl);
1100 bitmap_operation (conflicts, conflicts, all_defs, BITMAP_IOR);
1103 BITMAP_XFREE (all_defs);
1106 /* Allocate the per basic block info for traversing the insn stream for
1107 building live ranges. */
1109 static void
1110 init_bb_info ()
1112 basic_block bb;
1113 FOR_ALL_BB (bb)
1115 struct ra_bb_info *info =
1116 (struct ra_bb_info *) xcalloc (1, sizeof *info);
1117 info->regnos_mentioned = BITMAP_XMALLOC ();
1118 info->live_throughout = BITMAP_XMALLOC ();
1119 info->old_aux = bb->aux;
1120 bb->aux = (void *) info;
1124 /* Free that per basic block info. */
1126 static void
1127 free_bb_info ()
1129 basic_block bb;
1130 FOR_ALL_BB (bb)
1132 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1133 BITMAP_XFREE (info->regnos_mentioned);
1134 BITMAP_XFREE (info->live_throughout);
1135 bb->aux = info->old_aux;
1136 free (info);
1140 /* Toplevel function for the first part of this file.
1141 Connect web parts, thereby implicitly building webs, and remember
1142 their conflicts. */
1144 static void
1145 build_web_parts_and_conflicts (df)
1146 struct df *df;
1148 struct df_link *link;
1149 struct curr_use use;
1150 basic_block bb;
1152 number_seen = (int *) xcalloc (get_max_uid (), sizeof (int));
1153 visit_trace = (struct visit_trace *) xcalloc (get_max_uid (),
1154 sizeof (visit_trace[0]));
1155 update_regnos_mentioned ();
1157 /* Here's the main loop.
1158 It goes through all insn's, connects web parts along the way, notes
1159 conflicts between webparts, and remembers move instructions. */
1160 visited_pass = 0;
1161 for (use.regno = 0; use.regno < (unsigned int)max_regno; use.regno++)
1162 if (use.regno >= FIRST_PSEUDO_REGISTER || !fixed_regs[use.regno])
1163 for (link = df->regs[use.regno].uses; link; link = link->next)
1164 if (link->ref)
1166 struct ref *ref = link->ref;
1167 rtx insn = DF_REF_INSN (ref);
1168 /* Only recheck marked or new uses, or uses from hardregs. */
1169 if (use.regno >= FIRST_PSEUDO_REGISTER
1170 && DF_REF_ID (ref) < last_use_id
1171 && !TEST_BIT (last_check_uses, DF_REF_ID (ref)))
1172 continue;
1173 use.wp = &web_parts[df->def_id + DF_REF_ID (ref)];
1174 use.x = DF_REF_REG (ref);
1175 use.live_over_abnormal = 0;
1176 use.undefined = rtx_to_undefined (use.x);
1177 visited_pass++;
1178 live_in (df, &use, insn);
1179 if (use.live_over_abnormal)
1180 SET_BIT (live_over_abnormal, DF_REF_ID (ref));
1183 dump_number_seen ();
1184 FOR_ALL_BB (bb)
1186 struct ra_bb_info *info = (struct ra_bb_info *) bb->aux;
1187 livethrough_conflicts_bb (bb);
1188 bitmap_zero (info->live_throughout);
1189 info->pass = 0;
1191 free (visit_trace);
1192 free (number_seen);
1195 /* Here we look per insn, for DF references being in uses _and_ defs.
1196 This means, in the RTL a (REG xx) expression was seen as a
1197 read/modify/write, as happens for (set (subreg:SI (reg:DI xx)) (...))
1198 e.g. Our code has created two webs for this, as it should. Unfortunately,
1199 as the REG reference is only one time in the RTL we can't color
1200 both webs different (arguably this also would be wrong for a real
1201 read-mod-write instruction), so we must reconnect such webs. */
1203 static void
1204 connect_rmw_web_parts (df)
1205 struct df *df;
1207 unsigned int i;
1209 for (i = 0; i < df->use_id; i++)
1211 struct web_part *wp1 = &web_parts[df->def_id + i];
1212 rtx reg;
1213 struct df_link *link;
1214 if (!wp1->ref)
1215 continue;
1216 /* If it's an uninitialized web, we don't want to connect it to others,
1217 as the read cycle in read-mod-write had probably no effect. */
1218 if (find_web_part (wp1) >= &web_parts[df->def_id])
1219 continue;
1220 reg = DF_REF_REAL_REG (wp1->ref);
1221 link = DF_INSN_DEFS (df, DF_REF_INSN (wp1->ref));
1222 for (; link; link = link->next)
1223 if (reg == DF_REF_REAL_REG (link->ref))
1225 struct web_part *wp2 = &web_parts[DF_REF_ID (link->ref)];
1226 union_web_parts (wp1, wp2);
1231 /* Deletes all hardregs from *S which are not allowed for MODE. */
1233 static void
1234 prune_hardregs_for_mode (s, mode)
1235 HARD_REG_SET *s;
1236 enum machine_mode mode;
1238 AND_HARD_REG_SET (*s, hardregs_for_mode[(int) mode]);
1241 /* Initialize the members of a web, which are deducible from REG. */
1243 static void
1244 init_one_web_common (web, reg)
1245 struct web *web;
1246 rtx reg;
1248 if (GET_CODE (reg) != REG)
1249 abort ();
1250 /* web->id isn't initialized here. */
1251 web->regno = REGNO (reg);
1252 web->orig_x = reg;
1253 if (!web->dlink)
1255 web->dlink = (struct dlist *) ra_calloc (sizeof (struct dlist));
1256 DLIST_WEB (web->dlink) = web;
1258 /* XXX
1259 the former (superunion) doesn't constrain the graph enough. E.g.
1260 on x86 QImode _requires_ QI_REGS, but as alternate class usually
1261 GENERAL_REGS is given. So the graph is not constrained enough,
1262 thinking it has more freedom then it really has, which leads
1263 to repeated spill tryings. OTOH the latter (only using preferred
1264 class) is too constrained, as normally (e.g. with all SImode
1265 pseudos), they can be allocated also in the alternate class.
1266 What we really want, are the _exact_ hard regs allowed, not
1267 just a class. Later. */
1268 /*web->regclass = reg_class_superunion
1269 [reg_preferred_class (web->regno)]
1270 [reg_alternate_class (web->regno)];*/
1271 /*web->regclass = reg_preferred_class (web->regno);*/
1272 web->regclass = reg_class_subunion
1273 [reg_preferred_class (web->regno)] [reg_alternate_class (web->regno)];
1274 web->regclass = reg_preferred_class (web->regno);
1275 if (web->regno < FIRST_PSEUDO_REGISTER)
1277 web->color = web->regno;
1278 put_web (web, PRECOLORED);
1279 web->num_conflicts = UINT_MAX;
1280 web->add_hardregs = 0;
1281 CLEAR_HARD_REG_SET (web->usable_regs);
1282 SET_HARD_REG_BIT (web->usable_regs, web->regno);
1283 web->num_freedom = 1;
1285 else
1287 HARD_REG_SET alternate;
1288 web->color = -1;
1289 put_web (web, INITIAL);
1290 /* add_hardregs is wrong in multi-length classes, e.g.
1291 using a DFmode pseudo on x86 can result in class FLOAT_INT_REGS,
1292 where, if it finally is allocated to GENERAL_REGS it needs two,
1293 if allocated to FLOAT_REGS only one hardreg. XXX */
1294 web->add_hardregs =
1295 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
1296 web->num_conflicts = 0 * web->add_hardregs;
1297 COPY_HARD_REG_SET (web->usable_regs,
1298 reg_class_contents[reg_preferred_class (web->regno)]);
1299 COPY_HARD_REG_SET (alternate,
1300 reg_class_contents[reg_alternate_class (web->regno)]);
1301 IOR_HARD_REG_SET (web->usable_regs, alternate);
1302 /*IOR_HARD_REG_SET (web->usable_regs,
1303 reg_class_contents[reg_alternate_class
1304 (web->regno)]);*/
1305 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
1306 prune_hardregs_for_mode (&web->usable_regs,
1307 PSEUDO_REGNO_MODE (web->regno));
1308 #ifdef CANNOT_CHANGE_MODE_CLASS
1309 if (web->mode_changed)
1310 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
1311 #endif
1312 web->num_freedom = hard_regs_count (web->usable_regs);
1313 web->num_freedom -= web->add_hardregs;
1314 if (!web->num_freedom)
1315 abort();
1317 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
1320 /* Initializes WEBs members from REG or zero them. */
1322 static void
1323 init_one_web (web, reg)
1324 struct web *web;
1325 rtx reg;
1327 memset (web, 0, sizeof (struct web));
1328 init_one_web_common (web, reg);
1329 web->useless_conflicts = BITMAP_XMALLOC ();
1332 /* WEB is an old web, meaning it came from the last pass, and got a
1333 color. We want to remember some of it's info, so zero only some
1334 members. */
1336 static void
1337 reinit_one_web (web, reg)
1338 struct web *web;
1339 rtx reg;
1341 web->old_color = web->color + 1;
1342 init_one_web_common (web, reg);
1343 web->span_deaths = 0;
1344 web->spill_temp = 0;
1345 web->orig_spill_temp = 0;
1346 web->use_my_regs = 0;
1347 web->spill_cost = 0;
1348 web->was_spilled = 0;
1349 web->is_coalesced = 0;
1350 web->artificial = 0;
1351 web->live_over_abnormal = 0;
1352 web->mode_changed = 0;
1353 web->subreg_stripped = 0;
1354 web->move_related = 0;
1355 web->in_load = 0;
1356 web->target_of_spilled_move = 0;
1357 web->num_aliased = 0;
1358 if (web->type == PRECOLORED)
1360 web->num_defs = 0;
1361 web->num_uses = 0;
1362 web->orig_spill_cost = 0;
1364 CLEAR_HARD_REG_SET (web->bias_colors);
1365 CLEAR_HARD_REG_SET (web->prefer_colors);
1366 web->reg_rtx = NULL;
1367 web->stack_slot = NULL;
1368 web->pattern = NULL;
1369 web->alias = NULL;
1370 if (web->moves)
1371 abort ();
1372 if (!web->useless_conflicts)
1373 abort ();
1376 /* Insert and returns a subweb corresponding to REG into WEB (which
1377 becomes its super web). It must not exist already. */
1379 static struct web *
1380 add_subweb (web, reg)
1381 struct web *web;
1382 rtx reg;
1384 struct web *w;
1385 if (GET_CODE (reg) != SUBREG)
1386 abort ();
1387 w = (struct web *) xmalloc (sizeof (struct web));
1388 /* Copy most content from parent-web. */
1389 *w = *web;
1390 /* And initialize the private stuff. */
1391 w->orig_x = reg;
1392 w->add_hardregs = CLASS_MAX_NREGS (web->regclass, GET_MODE (reg)) - 1;
1393 w->num_conflicts = 0 * w->add_hardregs;
1394 w->num_defs = 0;
1395 w->num_uses = 0;
1396 w->dlink = NULL;
1397 w->parent_web = web;
1398 w->subreg_next = web->subreg_next;
1399 web->subreg_next = w;
1400 return w;
1403 /* Similar to add_subweb(), but instead of relying on a given SUBREG,
1404 we have just a size and an offset of the subpart of the REG rtx.
1405 In difference to add_subweb() this marks the new subweb as artificial. */
1407 static struct web *
1408 add_subweb_2 (web, size_word)
1409 struct web *web;
1410 unsigned int size_word;
1412 /* To get a correct mode for the to be produced subreg, we don't want to
1413 simply do a mode_for_size() for the mode_class of the whole web.
1414 Suppose we deal with a CDImode web, but search for a 8 byte part.
1415 Now mode_for_size() would only search in the class MODE_COMPLEX_INT
1416 and would find CSImode which probably is not what we want. Instead
1417 we want DImode, which is in a completely other class. For this to work
1418 we instead first search the already existing subwebs, and take
1419 _their_ modeclasses as base for a search for ourself. */
1420 rtx ref_rtx = (web->subreg_next ? web->subreg_next : web)->orig_x;
1421 unsigned int size = BYTE_LENGTH (size_word) * BITS_PER_UNIT;
1422 enum machine_mode mode;
1423 mode = mode_for_size (size, GET_MODE_CLASS (GET_MODE (ref_rtx)), 0);
1424 if (mode == BLKmode)
1425 mode = mode_for_size (size, MODE_INT, 0);
1426 if (mode == BLKmode)
1427 abort ();
1428 web = add_subweb (web, gen_rtx_SUBREG (mode, web->orig_x,
1429 BYTE_BEGIN (size_word)));
1430 web->artificial = 1;
1431 return web;
1434 /* Initialize all the web parts we are going to need. */
1436 static void
1437 init_web_parts (df)
1438 struct df *df;
1440 int regno;
1441 unsigned int no;
1442 num_webs = 0;
1443 for (no = 0; no < df->def_id; no++)
1445 if (df->defs[no])
1447 if (no < last_def_id && web_parts[no].ref != df->defs[no])
1448 abort ();
1449 web_parts[no].ref = df->defs[no];
1450 /* Uplink might be set from the last iteration. */
1451 if (!web_parts[no].uplink)
1452 num_webs++;
1454 else
1455 /* The last iteration might have left .ref set, while df_analyse()
1456 removed that ref (due to a removed copy insn) from the df->defs[]
1457 array. As we don't check for that in realloc_web_parts()
1458 we do that here. */
1459 web_parts[no].ref = NULL;
1461 for (no = 0; no < df->use_id; no++)
1463 if (df->uses[no])
1465 if (no < last_use_id
1466 && web_parts[no + df->def_id].ref != df->uses[no])
1467 abort ();
1468 web_parts[no + df->def_id].ref = df->uses[no];
1469 if (!web_parts[no + df->def_id].uplink)
1470 num_webs++;
1472 else
1473 web_parts[no + df->def_id].ref = NULL;
1476 /* We want to have only one web for each precolored register. */
1477 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1479 struct web_part *r1 = NULL;
1480 struct df_link *link;
1481 /* Here once was a test, if there is any DEF at all, and only then to
1482 merge all the parts. This was incorrect, we really also want to have
1483 only one web-part for hardregs, even if there is no explicit DEF. */
1484 /* Link together all defs... */
1485 for (link = df->regs[regno].defs; link; link = link->next)
1486 if (link->ref)
1488 struct web_part *r2 = &web_parts[DF_REF_ID (link->ref)];
1489 if (!r1)
1490 r1 = r2;
1491 else
1492 r1 = union_web_parts (r1, r2);
1494 /* ... and all uses. */
1495 for (link = df->regs[regno].uses; link; link = link->next)
1496 if (link->ref)
1498 struct web_part *r2 = &web_parts[df->def_id
1499 + DF_REF_ID (link->ref)];
1500 if (!r1)
1501 r1 = r2;
1502 else
1503 r1 = union_web_parts (r1, r2);
1508 /* In case we want to remember the conflict list of a WEB, before adding
1509 new conflicts, we copy it here to orig_conflict_list. */
1511 static void
1512 copy_conflict_list (web)
1513 struct web *web;
1515 struct conflict_link *cl;
1516 if (web->orig_conflict_list || web->have_orig_conflicts)
1517 abort ();
1518 web->have_orig_conflicts = 1;
1519 for (cl = web->conflict_list; cl; cl = cl->next)
1521 struct conflict_link *ncl;
1522 ncl = (struct conflict_link *) ra_alloc (sizeof *ncl);
1523 ncl->t = cl->t;
1524 ncl->sub = NULL;
1525 ncl->next = web->orig_conflict_list;
1526 web->orig_conflict_list = ncl;
1527 if (cl->sub)
1529 struct sub_conflict *sl, *nsl;
1530 for (sl = cl->sub; sl; sl = sl->next)
1532 nsl = (struct sub_conflict *) ra_alloc (sizeof *nsl);
1533 nsl->s = sl->s;
1534 nsl->t = sl->t;
1535 nsl->next = ncl->sub;
1536 ncl->sub = nsl;
1542 /* Possibly add an edge from web FROM to TO marking a conflict between
1543 those two. This is one half of marking a complete conflict, which notes
1544 in FROM, that TO is a conflict. Adding TO to FROM's conflicts might
1545 make other conflicts superfluous, because the current TO overlaps some web
1546 already being in conflict with FROM. In this case the smaller webs are
1547 deleted from the conflict list. Likewise if TO is overlapped by a web
1548 already in the list, it isn't added at all. Note, that this can only
1549 happen, if SUBREG webs are involved. */
1551 static void
1552 add_conflict_edge (from, to)
1553 struct web *from, *to;
1555 if (from->type != PRECOLORED)
1557 struct web *pfrom = find_web_for_subweb (from);
1558 struct web *pto = find_web_for_subweb (to);
1559 struct sub_conflict *sl;
1560 struct conflict_link *cl = pfrom->conflict_list;
1561 int may_delete = 1;
1563 /* This can happen when subwebs of one web conflict with each
1564 other. In live_out_1() we created such conflicts between yet
1565 undefined webparts and defs of parts which didn't overlap with the
1566 undefined bits. Then later they nevertheless could have merged into
1567 one web, and then we land here. */
1568 if (pfrom == pto)
1569 return;
1570 if (remember_conflicts && !pfrom->have_orig_conflicts)
1571 copy_conflict_list (pfrom);
1572 if (!TEST_BIT (sup_igraph, (pfrom->id * num_webs + pto->id)))
1574 cl = (struct conflict_link *) ra_alloc (sizeof (*cl));
1575 cl->t = pto;
1576 cl->sub = NULL;
1577 cl->next = pfrom->conflict_list;
1578 pfrom->conflict_list = cl;
1579 if (pto->type != SELECT && pto->type != COALESCED)
1580 pfrom->num_conflicts += 1 + pto->add_hardregs;
1581 SET_BIT (sup_igraph, (pfrom->id * num_webs + pto->id));
1582 may_delete = 0;
1584 else
1585 /* We don't need to test for cl==NULL, because at this point
1586 a cl with cl->t==pto is guaranteed to exist. */
1587 while (cl->t != pto)
1588 cl = cl->next;
1589 if (pfrom != from || pto != to)
1591 /* This is a subconflict which should be added.
1592 If we inserted cl in this invocation, we really need to add this
1593 subconflict. If we did _not_ add it here, we only add the
1594 subconflict, if cl already had subconflicts, because otherwise
1595 this indicated, that the whole webs already conflict, which
1596 means we are not interested in this subconflict. */
1597 if (!may_delete || cl->sub != NULL)
1599 sl = (struct sub_conflict *) ra_alloc (sizeof (*sl));
1600 sl->s = from;
1601 sl->t = to;
1602 sl->next = cl->sub;
1603 cl->sub = sl;
1606 else
1607 /* pfrom == from && pto == to means, that we are not interested
1608 anymore in the subconflict list for this pair, because anyway
1609 the whole webs conflict. */
1610 cl->sub = NULL;
1614 /* Record a conflict between two webs, if we haven't recorded it
1615 already. */
1617 void
1618 record_conflict (web1, web2)
1619 struct web *web1, *web2;
1621 unsigned int id1 = web1->id, id2 = web2->id;
1622 unsigned int index = igraph_index (id1, id2);
1623 /* Trivial non-conflict or already recorded conflict. */
1624 if (web1 == web2 || TEST_BIT (igraph, index))
1625 return;
1626 if (id1 == id2)
1627 abort ();
1628 /* As fixed_regs are no targets for allocation, conflicts with them
1629 are pointless. */
1630 if ((web1->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web1->regno])
1631 || (web2->regno < FIRST_PSEUDO_REGISTER && fixed_regs[web2->regno]))
1632 return;
1633 /* Conflicts with hardregs, which are not even a candidate
1634 for this pseudo are also pointless. */
1635 if ((web1->type == PRECOLORED
1636 && ! TEST_HARD_REG_BIT (web2->usable_regs, web1->regno))
1637 || (web2->type == PRECOLORED
1638 && ! TEST_HARD_REG_BIT (web1->usable_regs, web2->regno)))
1639 return;
1640 /* Similar if the set of possible hardregs don't intersect. This iteration
1641 those conflicts are useless (and would make num_conflicts wrong, because
1642 num_freedom is calculated from the set of possible hardregs).
1643 But in presence of spilling and incremental building of the graph we
1644 need to note all uses of webs conflicting with the spilled ones.
1645 Because the set of possible hardregs can change in the next round for
1646 spilled webs, we possibly have then conflicts with webs which would
1647 be excluded now (because then hardregs intersect). But we actually
1648 need to check those uses, and to get hold of them, we need to remember
1649 also webs conflicting with this one, although not conflicting in this
1650 round because of non-intersecting hardregs. */
1651 if (web1->type != PRECOLORED && web2->type != PRECOLORED
1652 && ! hard_regs_intersect_p (&web1->usable_regs, &web2->usable_regs))
1654 struct web *p1 = find_web_for_subweb (web1);
1655 struct web *p2 = find_web_for_subweb (web2);
1656 /* We expect these to be rare enough to justify bitmaps. And because
1657 we have only a special use for it, we note only the superwebs. */
1658 bitmap_set_bit (p1->useless_conflicts, p2->id);
1659 bitmap_set_bit (p2->useless_conflicts, p1->id);
1660 return;
1662 SET_BIT (igraph, index);
1663 add_conflict_edge (web1, web2);
1664 add_conflict_edge (web2, web1);
1667 /* For each web W this produces the missing subwebs Wx, such that it's
1668 possible to exactly specify (W-Wy) for all already existing subwebs Wy. */
1670 static void
1671 build_inverse_webs (web)
1672 struct web *web;
1674 struct web *sweb = web->subreg_next;
1675 unsigned HOST_WIDE_INT undef;
1677 undef = rtx_to_undefined (web->orig_x);
1678 for (; sweb; sweb = sweb->subreg_next)
1679 /* Only create inverses of non-artificial webs. */
1680 if (!sweb->artificial)
1682 unsigned HOST_WIDE_INT bits;
1683 bits = undef & ~ rtx_to_undefined (sweb->orig_x);
1684 while (bits)
1686 unsigned int size_word = undef_to_size_word (web->orig_x, &bits);
1687 if (!find_subweb_2 (web, size_word))
1688 add_subweb_2 (web, size_word);
1693 /* Copies the content of WEB to a new one, and link it into WL.
1694 Used for consistency checking. */
1696 static void
1697 copy_web (web, wl)
1698 struct web *web;
1699 struct web_link **wl;
1701 struct web *cweb = (struct web *) xmalloc (sizeof *cweb);
1702 struct web_link *link = (struct web_link *) ra_alloc (sizeof *link);
1703 link->next = *wl;
1704 *wl = link;
1705 link->web = cweb;
1706 *cweb = *web;
1709 /* Given a list of webs LINK, compare the content of the webs therein
1710 with the global webs of the same ID. For consistency checking. */
1712 static void
1713 compare_and_free_webs (link)
1714 struct web_link **link;
1716 struct web_link *wl;
1717 for (wl = *link; wl; wl = wl->next)
1719 struct web *web1 = wl->web;
1720 struct web *web2 = ID2WEB (web1->id);
1721 if (web1->regno != web2->regno
1722 || web1->mode_changed != web2->mode_changed
1723 || !rtx_equal_p (web1->orig_x, web2->orig_x)
1724 || web1->type != web2->type
1725 /* Only compare num_defs/num_uses with non-hardreg webs.
1726 E.g. the number of uses of the framepointer changes due to
1727 inserting spill code. */
1728 || (web1->type != PRECOLORED
1729 && (web1->num_uses != web2->num_uses
1730 || web1->num_defs != web2->num_defs))
1731 /* Similarly, if the framepointer was unreferenced originally
1732 but we added spills, these fields may not match. */
1733 || (web1->type != PRECOLORED
1734 && web1->crosses_call != web2->crosses_call)
1735 || (web1->type != PRECOLORED
1736 && web1->live_over_abnormal != web2->live_over_abnormal))
1737 abort ();
1738 if (web1->type != PRECOLORED)
1740 unsigned int i;
1741 for (i = 0; i < web1->num_defs; i++)
1742 if (web1->defs[i] != web2->defs[i])
1743 abort ();
1744 for (i = 0; i < web1->num_uses; i++)
1745 if (web1->uses[i] != web2->uses[i])
1746 abort ();
1748 if (web1->type == PRECOLORED)
1750 if (web1->defs)
1751 free (web1->defs);
1752 if (web1->uses)
1753 free (web1->uses);
1755 free (web1);
1757 *link = NULL;
1760 /* Setup and fill uses[] and defs[] arrays of the webs. */
1762 static void
1763 init_webs_defs_uses ()
1765 struct dlist *d;
1766 for (d = WEBS(INITIAL); d; d = d->next)
1768 struct web *web = DLIST_WEB (d);
1769 unsigned int def_i, use_i;
1770 struct df_link *link;
1771 if (web->old_web)
1772 continue;
1773 if (web->type == PRECOLORED)
1775 web->num_defs = web->num_uses = 0;
1776 continue;
1778 if (web->num_defs)
1779 web->defs = (struct ref **) xmalloc (web->num_defs *
1780 sizeof (web->defs[0]));
1781 if (web->num_uses)
1782 web->uses = (struct ref **) xmalloc (web->num_uses *
1783 sizeof (web->uses[0]));
1784 def_i = use_i = 0;
1785 for (link = web->temp_refs; link; link = link->next)
1787 if (DF_REF_REG_DEF_P (link->ref))
1788 web->defs[def_i++] = link->ref;
1789 else
1790 web->uses[use_i++] = link->ref;
1792 web->temp_refs = NULL;
1793 if (def_i != web->num_defs || use_i != web->num_uses)
1794 abort ();
1798 /* Called by parts_to_webs(). This creates (or recreates) the webs (and
1799 subwebs) from web parts, gives them IDs (only to super webs), and sets
1800 up use2web and def2web arrays. */
1802 static unsigned int
1803 parts_to_webs_1 (df, copy_webs, all_refs)
1804 struct df *df;
1805 struct web_link **copy_webs;
1806 struct df_link *all_refs;
1808 unsigned int i;
1809 unsigned int webnum;
1810 unsigned int def_id = df->def_id;
1811 unsigned int use_id = df->use_id;
1812 struct web_part *wp_first_use = &web_parts[def_id];
1814 /* For each root web part: create and initialize a new web,
1815 setup def2web[] and use2web[] for all defs and uses, and
1816 id2web for all new webs. */
1818 webnum = 0;
1819 for (i = 0; i < def_id + use_id; i++)
1821 struct web *subweb, *web = 0; /* Initialize web to silence warnings. */
1822 struct web_part *wp = &web_parts[i];
1823 struct ref *ref = wp->ref;
1824 unsigned int ref_id;
1825 rtx reg;
1826 if (!ref)
1827 continue;
1828 ref_id = i;
1829 if (i >= def_id)
1830 ref_id -= def_id;
1831 all_refs[i].ref = ref;
1832 reg = DF_REF_REG (ref);
1833 if (! wp->uplink)
1835 /* If we have a web part root, create a new web. */
1836 unsigned int newid = ~(unsigned)0;
1837 unsigned int old_web = 0;
1839 /* In the first pass, there are no old webs, so unconditionally
1840 allocate a new one. */
1841 if (ra_pass == 1)
1843 web = (struct web *) xmalloc (sizeof (struct web));
1844 newid = last_num_webs++;
1845 init_one_web (web, GET_CODE (reg) == SUBREG
1846 ? SUBREG_REG (reg) : reg);
1848 /* Otherwise, we look for an old web. */
1849 else
1851 /* Remember, that use2web == def2web + def_id.
1852 Ergo is def2web[i] == use2web[i - def_id] for i >= def_id.
1853 So we only need to look into def2web[] array.
1854 Try to look at the web, which formerly belonged to this
1855 def (or use). */
1856 web = def2web[i];
1857 /* Or which belonged to this hardreg. */
1858 if (!web && DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER)
1859 web = hardreg2web[DF_REF_REGNO (ref)];
1860 if (web)
1862 /* If we found one, reuse it. */
1863 web = find_web_for_subweb (web);
1864 remove_list (web->dlink, &WEBS(INITIAL));
1865 old_web = 1;
1866 copy_web (web, copy_webs);
1868 else
1870 /* Otherwise use a new one. First from the free list. */
1871 if (WEBS(FREE))
1872 web = DLIST_WEB (pop_list (&WEBS(FREE)));
1873 else
1875 /* Else allocate a new one. */
1876 web = (struct web *) xmalloc (sizeof (struct web));
1877 newid = last_num_webs++;
1880 /* The id is zeroed in init_one_web(). */
1881 if (newid == ~(unsigned)0)
1882 newid = web->id;
1883 if (old_web)
1884 reinit_one_web (web, GET_CODE (reg) == SUBREG
1885 ? SUBREG_REG (reg) : reg);
1886 else
1887 init_one_web (web, GET_CODE (reg) == SUBREG
1888 ? SUBREG_REG (reg) : reg);
1889 web->old_web = (old_web && web->type != PRECOLORED) ? 1 : 0;
1891 web->span_deaths = wp->spanned_deaths;
1892 web->crosses_call = wp->crosses_call;
1893 web->id = newid;
1894 web->temp_refs = NULL;
1895 webnum++;
1896 if (web->regno < FIRST_PSEUDO_REGISTER && !hardreg2web[web->regno])
1897 hardreg2web[web->regno] = web;
1898 else if (web->regno < FIRST_PSEUDO_REGISTER
1899 && hardreg2web[web->regno] != web)
1900 abort ();
1903 /* If this reference already had a web assigned, we are done.
1904 This test better is equivalent to the web being an old web.
1905 Otherwise something is screwed. (This is tested) */
1906 if (def2web[i] != NULL)
1908 web = def2web[i];
1909 web = find_web_for_subweb (web);
1910 /* But if this ref includes a mode change, or was a use live
1911 over an abnormal call, set appropriate flags in the web. */
1912 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1913 && web->regno >= FIRST_PSEUDO_REGISTER)
1914 web->mode_changed = 1;
1915 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1916 && web->regno >= FIRST_PSEUDO_REGISTER)
1917 web->subreg_stripped = 1;
1918 if (i >= def_id
1919 && TEST_BIT (live_over_abnormal, ref_id))
1920 web->live_over_abnormal = 1;
1921 /* And check, that it's not a newly allocated web. This would be
1922 an inconsistency. */
1923 if (!web->old_web || web->type == PRECOLORED)
1924 abort ();
1925 continue;
1927 /* In case this was no web part root, we need to initialize WEB
1928 from the ref2web array belonging to the root. */
1929 if (wp->uplink)
1931 struct web_part *rwp = find_web_part (wp);
1932 unsigned int j = DF_REF_ID (rwp->ref);
1933 if (rwp < wp_first_use)
1934 web = def2web[j];
1935 else
1936 web = use2web[j];
1937 web = find_web_for_subweb (web);
1940 /* Remember all references for a web in a single linked list. */
1941 all_refs[i].next = web->temp_refs;
1942 web->temp_refs = &all_refs[i];
1944 /* And the test, that if def2web[i] was NULL above, that we are _not_
1945 an old web. */
1946 if (web->old_web && web->type != PRECOLORED)
1947 abort ();
1949 /* Possible create a subweb, if this ref was a subreg. */
1950 if (GET_CODE (reg) == SUBREG)
1952 subweb = find_subweb (web, reg);
1953 if (!subweb)
1955 subweb = add_subweb (web, reg);
1956 if (web->old_web)
1957 abort ();
1960 else
1961 subweb = web;
1963 /* And look, if the ref involves an invalid mode change. */
1964 if ((DF_REF_FLAGS (ref) & DF_REF_MODE_CHANGE) != 0
1965 && web->regno >= FIRST_PSEUDO_REGISTER)
1966 web->mode_changed = 1;
1967 if ((DF_REF_FLAGS (ref) & DF_REF_STRIPPED) != 0
1968 && web->regno >= FIRST_PSEUDO_REGISTER)
1969 web->subreg_stripped = 1;
1971 /* Setup def2web, or use2web, and increment num_defs or num_uses. */
1972 if (i < def_id)
1974 /* Some sanity checks. */
1975 if (ra_pass > 1)
1977 struct web *compare = def2web[i];
1978 if (i < last_def_id)
1980 if (web->old_web && compare != subweb)
1981 abort ();
1983 if (!web->old_web && compare)
1984 abort ();
1985 if (compare && compare != subweb)
1986 abort ();
1988 def2web[i] = subweb;
1989 web->num_defs++;
1991 else
1993 if (ra_pass > 1)
1995 struct web *compare = use2web[ref_id];
1996 if (ref_id < last_use_id)
1998 if (web->old_web && compare != subweb)
1999 abort ();
2001 if (!web->old_web && compare)
2002 abort ();
2003 if (compare && compare != subweb)
2004 abort ();
2006 use2web[ref_id] = subweb;
2007 web->num_uses++;
2008 if (TEST_BIT (live_over_abnormal, ref_id))
2009 web->live_over_abnormal = 1;
2013 /* We better now have exactly as many webs as we had web part roots. */
2014 if (webnum != num_webs)
2015 abort ();
2017 return webnum;
2020 /* This builds full webs out of web parts, without relating them to each
2021 other (i.e. without creating the conflict edges). */
2023 static void
2024 parts_to_webs (df)
2025 struct df *df;
2027 unsigned int i;
2028 unsigned int webnum;
2029 struct web_link *copy_webs = NULL;
2030 struct dlist *d;
2031 struct df_link *all_refs;
2032 num_subwebs = 0;
2034 /* First build webs and ordinary subwebs. */
2035 all_refs = (struct df_link *) xcalloc (df->def_id + df->use_id,
2036 sizeof (all_refs[0]));
2037 webnum = parts_to_webs_1 (df, &copy_webs, all_refs);
2039 /* Setup the webs for hardregs which are still missing (weren't
2040 mentioned in the code). */
2041 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2042 if (!hardreg2web[i])
2044 struct web *web = (struct web *) xmalloc (sizeof (struct web));
2045 init_one_web (web, gen_rtx_REG (reg_raw_mode[i], i));
2046 web->id = last_num_webs++;
2047 hardreg2web[web->regno] = web;
2049 num_webs = last_num_webs;
2051 /* Now create all artificial subwebs, i.e. those, which do
2052 not correspond to a real subreg in the current function's RTL, but
2053 which nevertheless is a target of a conflict.
2054 XXX we need to merge this loop with the one above, which means, we need
2055 a way to later override the artificiality. Beware: currently
2056 add_subweb_2() relies on the existence of normal subwebs for deducing
2057 a sane mode to use for the artificial subwebs. */
2058 for (i = 0; i < df->def_id + df->use_id; i++)
2060 struct web_part *wp = &web_parts[i];
2061 struct tagged_conflict *cl;
2062 struct web *web;
2063 if (wp->uplink || !wp->ref)
2065 if (wp->sub_conflicts)
2066 abort ();
2067 continue;
2069 web = def2web[i];
2070 web = find_web_for_subweb (web);
2071 for (cl = wp->sub_conflicts; cl; cl = cl->next)
2072 if (!find_subweb_2 (web, cl->size_word))
2073 add_subweb_2 (web, cl->size_word);
2076 /* And now create artificial subwebs needed for representing the inverse
2077 of some subwebs. This also gives IDs to all subwebs. */
2078 webnum = last_num_webs;
2079 for (d = WEBS(INITIAL); d; d = d->next)
2081 struct web *web = DLIST_WEB (d);
2082 if (web->subreg_next)
2084 struct web *sweb;
2085 build_inverse_webs (web);
2086 for (sweb = web->subreg_next; sweb; sweb = sweb->subreg_next)
2087 sweb->id = webnum++;
2091 /* Now that everyone has an ID, we can setup the id2web array. */
2092 id2web = (struct web **) xcalloc (webnum, sizeof (id2web[0]));
2093 for (d = WEBS(INITIAL); d; d = d->next)
2095 struct web *web = DLIST_WEB (d);
2096 ID2WEB (web->id) = web;
2097 for (web = web->subreg_next; web; web = web->subreg_next)
2098 ID2WEB (web->id) = web;
2100 num_subwebs = webnum - last_num_webs;
2101 num_allwebs = num_webs + num_subwebs;
2102 num_webs += num_subwebs;
2104 /* Allocate and clear the conflict graph bitmaps. */
2105 igraph = sbitmap_alloc (num_webs * num_webs / 2);
2106 sup_igraph = sbitmap_alloc (num_webs * num_webs);
2107 sbitmap_zero (igraph);
2108 sbitmap_zero (sup_igraph);
2110 /* Distribute the references to their webs. */
2111 init_webs_defs_uses ();
2112 /* And do some sanity checks if old webs, and those recreated from the
2113 really are the same. */
2114 compare_and_free_webs (&copy_webs);
2115 free (all_refs);
2118 /* This deletes all conflicts to and from webs which need to be renewed
2119 in this pass of the allocator, i.e. those which were spilled in the
2120 last pass. Furthermore it also rebuilds the bitmaps for the remaining
2121 conflicts. */
2123 static void
2124 reset_conflicts ()
2126 unsigned int i;
2127 bitmap newwebs = BITMAP_XMALLOC ();
2128 for (i = 0; i < num_webs - num_subwebs; i++)
2130 struct web *web = ID2WEB (i);
2131 /* Hardreg webs and non-old webs are new webs (which
2132 need rebuilding). */
2133 if (web->type == PRECOLORED || !web->old_web)
2134 bitmap_set_bit (newwebs, web->id);
2137 for (i = 0; i < num_webs - num_subwebs; i++)
2139 struct web *web = ID2WEB (i);
2140 struct conflict_link *cl;
2141 struct conflict_link **pcl;
2142 pcl = &(web->conflict_list);
2144 /* First restore the conflict list to be like it was before
2145 coalescing. */
2146 if (web->have_orig_conflicts)
2148 web->conflict_list = web->orig_conflict_list;
2149 web->orig_conflict_list = NULL;
2151 if (web->orig_conflict_list)
2152 abort ();
2154 /* New non-precolored webs, have no conflict list. */
2155 if (web->type != PRECOLORED && !web->old_web)
2157 *pcl = NULL;
2158 /* Useless conflicts will be rebuilt completely. But check
2159 for cleanliness, as the web might have come from the
2160 free list. */
2161 if (bitmap_first_set_bit (web->useless_conflicts) >= 0)
2162 abort ();
2164 else
2166 /* Useless conflicts with new webs will be rebuilt if they
2167 are still there. */
2168 bitmap_operation (web->useless_conflicts, web->useless_conflicts,
2169 newwebs, BITMAP_AND_COMPL);
2170 /* Go through all conflicts, and retain those to old webs. */
2171 for (cl = web->conflict_list; cl; cl = cl->next)
2173 if (cl->t->old_web || cl->t->type == PRECOLORED)
2175 *pcl = cl;
2176 pcl = &(cl->next);
2178 /* Also restore the entries in the igraph bitmaps. */
2179 web->num_conflicts += 1 + cl->t->add_hardregs;
2180 SET_BIT (sup_igraph, (web->id * num_webs + cl->t->id));
2181 /* No subconflicts mean full webs conflict. */
2182 if (!cl->sub)
2183 SET_BIT (igraph, igraph_index (web->id, cl->t->id));
2184 else
2185 /* Else only the parts in cl->sub must be in the
2186 bitmap. */
2188 struct sub_conflict *sl;
2189 for (sl = cl->sub; sl; sl = sl->next)
2190 SET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2194 *pcl = NULL;
2196 web->have_orig_conflicts = 0;
2198 BITMAP_XFREE (newwebs);
2201 /* For each web check it's num_conflicts member against that
2202 number, as calculated from scratch from all neighbors. */
2204 #if 0
2205 static void
2206 check_conflict_numbers ()
2208 unsigned int i;
2209 for (i = 0; i < num_webs; i++)
2211 struct web *web = ID2WEB (i);
2212 int new_conf = 0 * web->add_hardregs;
2213 struct conflict_link *cl;
2214 for (cl = web->conflict_list; cl; cl = cl->next)
2215 if (cl->t->type != SELECT && cl->t->type != COALESCED)
2216 new_conf += 1 + cl->t->add_hardregs;
2217 if (web->type != PRECOLORED && new_conf != web->num_conflicts)
2218 abort ();
2221 #endif
2223 /* Convert the conflicts between web parts to conflicts between full webs.
2225 This can't be done in parts_to_webs(), because for recording conflicts
2226 between webs we need to know their final usable_regs set, which is used
2227 to discard non-conflicts (between webs having no hard reg in common).
2228 But this is set for spill temporaries only after the webs itself are
2229 built. Until then the usable_regs set is based on the pseudo regno used
2230 in this web, which may contain far less registers than later determined.
2231 This would result in us loosing conflicts (due to record_conflict()
2232 thinking that a web can only be allocated to the current usable_regs,
2233 whereas later this is extended) leading to colorings, where some regs which
2234 in reality conflict get the same color. */
2236 static void
2237 conflicts_between_webs (df)
2238 struct df *df;
2240 unsigned int i;
2241 #ifdef STACK_REGS
2242 struct dlist *d;
2243 #endif
2244 bitmap ignore_defs = BITMAP_XMALLOC ();
2245 unsigned int have_ignored;
2246 unsigned int *pass_cache = (unsigned int *) xcalloc (num_webs, sizeof (int));
2247 unsigned int pass = 0;
2249 if (ra_pass > 1)
2250 reset_conflicts ();
2252 /* It is possible, that in the conflict bitmaps still some defs I are noted,
2253 which have web_parts[I].ref being NULL. This can happen, when from the
2254 last iteration the conflict bitmap for this part wasn't deleted, but a
2255 conflicting move insn was removed. It's DEF is still in the conflict
2256 bitmap, but it doesn't exist anymore in df->defs. To not have to check
2257 it in the tight loop below, we instead remember the ID's of them in a
2258 bitmap, and loop only over IDs which are not in it. */
2259 for (i = 0; i < df->def_id; i++)
2260 if (web_parts[i].ref == NULL)
2261 bitmap_set_bit (ignore_defs, i);
2262 have_ignored = (bitmap_first_set_bit (ignore_defs) >= 0);
2264 /* Now record all conflicts between webs. Note that we only check
2265 the conflict bitmaps of all defs. Conflict bitmaps are only in
2266 webpart roots. If they are in uses, those uses are roots, which
2267 means, that this is an uninitialized web, whose conflicts
2268 don't matter. Nevertheless for hardregs we also need to check uses.
2269 E.g. hardregs used for argument passing have no DEF in the RTL,
2270 but if they have uses, they indeed conflict with all DEFs they
2271 overlap. */
2272 for (i = 0; i < df->def_id + df->use_id; i++)
2274 struct tagged_conflict *cl = web_parts[i].sub_conflicts;
2275 struct web *supweb1;
2276 if (!cl
2277 || (i >= df->def_id
2278 && DF_REF_REGNO (web_parts[i].ref) >= FIRST_PSEUDO_REGISTER))
2279 continue;
2280 supweb1 = def2web[i];
2281 supweb1 = find_web_for_subweb (supweb1);
2282 for (; cl; cl = cl->next)
2283 if (cl->conflicts)
2285 int j;
2286 struct web *web1 = find_subweb_2 (supweb1, cl->size_word);
2287 if (have_ignored)
2288 bitmap_operation (cl->conflicts, cl->conflicts, ignore_defs,
2289 BITMAP_AND_COMPL);
2290 /* We reduce the number of calls to record_conflict() with this
2291 pass thing. record_conflict() itself also has some early-out
2292 optimizations, but here we can use the special properties of
2293 the loop (constant web1) to reduce that even more.
2294 We once used an sbitmap of already handled web indices,
2295 but sbitmaps are slow to clear and bitmaps are slow to
2296 set/test. The current approach needs more memory, but
2297 locality is large. */
2298 pass++;
2300 /* Note, that there are only defs in the conflicts bitset. */
2301 EXECUTE_IF_SET_IN_BITMAP (
2302 cl->conflicts, 0, j,
2304 struct web *web2 = def2web[j];
2305 unsigned int id2 = web2->id;
2306 if (pass_cache[id2] != pass)
2308 pass_cache[id2] = pass;
2309 record_conflict (web1, web2);
2315 free (pass_cache);
2316 BITMAP_XFREE (ignore_defs);
2318 #ifdef STACK_REGS
2319 /* Pseudos can't go in stack regs if they are live at the beginning of
2320 a block that is reached by an abnormal edge. */
2321 for (d = WEBS(INITIAL); d; d = d->next)
2323 struct web *web = DLIST_WEB (d);
2324 int j;
2325 if (web->live_over_abnormal)
2326 for (j = FIRST_STACK_REG; j <= LAST_STACK_REG; j++)
2327 record_conflict (web, hardreg2web[j]);
2329 #endif
2332 /* Remember that a web was spilled, and change some characteristics
2333 accordingly. */
2335 static void
2336 remember_web_was_spilled (web)
2337 struct web *web;
2339 int i;
2340 unsigned int found_size = 0;
2341 int adjust;
2342 web->spill_temp = 1;
2344 /* From now on don't use reg_pref/alt_class (regno) anymore for
2345 this web, but instead usable_regs. We can't use spill_temp for
2346 this, as it might get reset later, when we are coalesced to a
2347 non-spill-temp. In that case we still want to use usable_regs. */
2348 web->use_my_regs = 1;
2350 /* We don't constrain spill temporaries in any way for now.
2351 It's wrong sometimes to have the same constraints or
2352 preferences as the original pseudo, esp. if they were very narrow.
2353 (E.g. there once was a reg wanting class AREG (only one register)
2354 without alternative class. As long, as also the spill-temps for
2355 this pseudo had the same constraints it was spilled over and over.
2356 Ideally we want some constraints also on spill-temps: Because they are
2357 not only loaded/stored, but also worked with, any constraints from insn
2358 alternatives needs applying. Currently this is dealt with by reload, as
2359 many other things, but at some time we want to integrate that
2360 functionality into the allocator. */
2361 if (web->regno >= max_normal_pseudo)
2363 COPY_HARD_REG_SET (web->usable_regs,
2364 reg_class_contents[reg_preferred_class (web->regno)]);
2365 IOR_HARD_REG_SET (web->usable_regs,
2366 reg_class_contents[reg_alternate_class (web->regno)]);
2368 else
2369 COPY_HARD_REG_SET (web->usable_regs,
2370 reg_class_contents[(int) GENERAL_REGS]);
2371 AND_COMPL_HARD_REG_SET (web->usable_regs, never_use_colors);
2372 prune_hardregs_for_mode (&web->usable_regs, PSEUDO_REGNO_MODE (web->regno));
2373 #ifdef CANNOT_CHANGE_MODE_CLASS
2374 if (web->mode_changed)
2375 AND_COMPL_HARD_REG_SET (web->usable_regs, invalid_mode_change_regs);
2376 #endif
2377 web->num_freedom = hard_regs_count (web->usable_regs);
2378 if (!web->num_freedom)
2379 abort();
2380 COPY_HARD_REG_SET (web->orig_usable_regs, web->usable_regs);
2381 /* Now look for a class, which is subset of our constraints, to
2382 setup add_hardregs, and regclass for debug output. */
2383 web->regclass = NO_REGS;
2384 for (i = (int) ALL_REGS - 1; i > 0; i--)
2386 unsigned int size;
2387 HARD_REG_SET test;
2388 COPY_HARD_REG_SET (test, reg_class_contents[i]);
2389 AND_COMPL_HARD_REG_SET (test, never_use_colors);
2390 GO_IF_HARD_REG_SUBSET (test, web->usable_regs, found);
2391 continue;
2392 found:
2393 /* Measure the actual number of bits which really are overlapping
2394 the target regset, not just the reg_class_size. */
2395 size = hard_regs_count (test);
2396 if (found_size < size)
2398 web->regclass = (enum reg_class) i;
2399 found_size = size;
2403 adjust = 0 * web->add_hardregs;
2404 web->add_hardregs =
2405 CLASS_MAX_NREGS (web->regclass, PSEUDO_REGNO_MODE (web->regno)) - 1;
2406 web->num_freedom -= web->add_hardregs;
2407 if (!web->num_freedom)
2408 abort();
2409 adjust -= 0 * web->add_hardregs;
2410 web->num_conflicts -= adjust;
2413 /* Look at each web, if it is used as spill web. Or better said,
2414 if it will be spillable in this pass. */
2416 static void
2417 detect_spill_temps ()
2419 struct dlist *d;
2420 bitmap already = BITMAP_XMALLOC ();
2422 /* Detect webs used for spill temporaries. */
2423 for (d = WEBS(INITIAL); d; d = d->next)
2425 struct web *web = DLIST_WEB (d);
2427 /* Below only the detection of spill temporaries. We never spill
2428 precolored webs, so those can't be spill temporaries. The code above
2429 (remember_web_was_spilled) can't currently cope with hardregs
2430 anyway. */
2431 if (web->regno < FIRST_PSEUDO_REGISTER)
2432 continue;
2433 /* Uninitialized webs can't be spill-temporaries. */
2434 if (web->num_defs == 0)
2435 continue;
2437 /* A web with only defs and no uses can't be spilled. Nevertheless
2438 it must get a color, as it takes away an register from all webs
2439 live at these defs. So we make it a short web. */
2440 if (web->num_uses == 0)
2441 web->spill_temp = 3;
2442 /* A web which was spilled last time, but for which no insns were
2443 emitted (can happen with IR spilling ignoring sometimes
2444 all deaths). */
2445 else if (web->changed)
2446 web->spill_temp = 1;
2447 /* A spill temporary has one def, one or more uses, all uses
2448 are in one insn, and either the def or use insn was inserted
2449 by the allocator. */
2450 /* XXX not correct currently. There might also be spill temps
2451 involving more than one def. Usually that's an additional
2452 clobber in the using instruction. We might also constrain
2453 ourself to that, instead of like currently marking all
2454 webs involving any spill insns at all. */
2455 else
2457 unsigned int i;
2458 int spill_involved = 0;
2459 for (i = 0; i < web->num_uses && !spill_involved; i++)
2460 if (DF_REF_INSN_UID (web->uses[i]) >= orig_max_uid)
2461 spill_involved = 1;
2462 for (i = 0; i < web->num_defs && !spill_involved; i++)
2463 if (DF_REF_INSN_UID (web->defs[i]) >= orig_max_uid)
2464 spill_involved = 1;
2466 if (spill_involved/* && ra_pass > 2*/)
2468 int num_deaths = web->span_deaths;
2469 /* Mark webs involving at least one spill insn as
2470 spill temps. */
2471 remember_web_was_spilled (web);
2472 /* Search for insns which define and use the web in question
2473 at the same time, i.e. look for rmw insns. If these insns
2474 are also deaths of other webs they might have been counted
2475 as such into web->span_deaths. But because of the rmw nature
2476 of this insn it is no point where a load/reload could be
2477 placed successfully (it would still conflict with the
2478 dead web), so reduce the number of spanned deaths by those
2479 insns. Note that sometimes such deaths are _not_ counted,
2480 so negative values can result. */
2481 bitmap_zero (already);
2482 for (i = 0; i < web->num_defs; i++)
2484 rtx insn = web->defs[i]->insn;
2485 if (TEST_BIT (insns_with_deaths, INSN_UID (insn))
2486 && !bitmap_bit_p (already, INSN_UID (insn)))
2488 unsigned int j;
2489 bitmap_set_bit (already, INSN_UID (insn));
2490 /* Only decrement it once for each insn. */
2491 for (j = 0; j < web->num_uses; j++)
2492 if (web->uses[j]->insn == insn)
2494 num_deaths--;
2495 break;
2499 /* But mark them specially if they could possibly be spilled,
2500 either because they cross some deaths (without the above
2501 mentioned ones) or calls. */
2502 if (web->crosses_call || num_deaths > 0)
2503 web->spill_temp = 1 * 2;
2505 /* A web spanning no deaths can't be spilled either. No loads
2506 would be created for it, ergo no defs. So the insns wouldn't
2507 change making the graph not easier to color. Make this also
2508 a short web. Don't do this if it crosses calls, as these are
2509 also points of reloads. */
2510 else if (web->span_deaths == 0 && !web->crosses_call)
2511 web->spill_temp = 3;
2513 web->orig_spill_temp = web->spill_temp;
2515 BITMAP_XFREE (already);
2518 /* Returns nonzero if the rtx MEM refers somehow to a stack location. */
2521 memref_is_stack_slot (mem)
2522 rtx mem;
2524 rtx ad = XEXP (mem, 0);
2525 rtx x;
2526 if (GET_CODE (ad) != PLUS || GET_CODE (XEXP (ad, 1)) != CONST_INT)
2527 return 0;
2528 x = XEXP (ad, 0);
2529 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
2530 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
2531 || x == stack_pointer_rtx)
2532 return 1;
2533 return 0;
2536 /* Returns nonzero, if rtx X somewhere contains any pseudo register. */
2538 static int
2539 contains_pseudo (x)
2540 rtx x;
2542 const char *fmt;
2543 int i;
2544 if (GET_CODE (x) == SUBREG)
2545 x = SUBREG_REG (x);
2546 if (GET_CODE (x) == REG)
2548 if (REGNO (x) >= FIRST_PSEUDO_REGISTER)
2549 return 1;
2550 else
2551 return 0;
2554 fmt = GET_RTX_FORMAT (GET_CODE (x));
2555 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2556 if (fmt[i] == 'e')
2558 if (contains_pseudo (XEXP (x, i)))
2559 return 1;
2561 else if (fmt[i] == 'E')
2563 int j;
2564 for (j = 0; j < XVECLEN (x, i); j++)
2565 if (contains_pseudo (XVECEXP (x, i, j)))
2566 return 1;
2568 return 0;
2571 /* Returns nonzero, if we are able to rematerialize something with
2572 value X. If it's not a general operand, we test if we can produce
2573 a valid insn which set a pseudo to that value, and that insn doesn't
2574 clobber anything. */
2576 static GTY(()) rtx remat_test_insn;
2577 static int
2578 want_to_remat (x)
2579 rtx x;
2581 int num_clobbers = 0;
2582 int icode;
2584 /* If this is a valid operand, we are OK. If it's VOIDmode, we aren't. */
2585 if (general_operand (x, GET_MODE (x)))
2586 return 1;
2588 /* Otherwise, check if we can make a valid insn from it. First initialize
2589 our test insn if we haven't already. */
2590 if (remat_test_insn == 0)
2592 remat_test_insn
2593 = make_insn_raw (gen_rtx_SET (VOIDmode,
2594 gen_rtx_REG (word_mode,
2595 FIRST_PSEUDO_REGISTER * 2),
2596 const0_rtx));
2597 NEXT_INSN (remat_test_insn) = PREV_INSN (remat_test_insn) = 0;
2600 /* Now make an insn like the one we would make when rematerializing
2601 the value X and see if valid. */
2602 PUT_MODE (SET_DEST (PATTERN (remat_test_insn)), GET_MODE (x));
2603 SET_SRC (PATTERN (remat_test_insn)) = x;
2604 /* XXX For now we don't allow any clobbers to be added, not just no
2605 hardreg clobbers. */
2606 return ((icode = recog (PATTERN (remat_test_insn), remat_test_insn,
2607 &num_clobbers)) >= 0
2608 && (num_clobbers == 0
2609 /*|| ! added_clobbers_hard_reg_p (icode)*/));
2612 /* Look at all webs, if they perhaps are rematerializable.
2613 They are, if all their defs are simple sets to the same value,
2614 and that value is simple enough, and want_to_remat() holds for it. */
2616 static void
2617 detect_remat_webs ()
2619 struct dlist *d;
2620 for (d = WEBS(INITIAL); d; d = d->next)
2622 struct web *web = DLIST_WEB (d);
2623 unsigned int i;
2624 rtx pat = NULL_RTX;
2625 /* Hardregs and useless webs aren't spilled -> no remat necessary.
2626 Defless webs obviously also can't be rematerialized. */
2627 if (web->regno < FIRST_PSEUDO_REGISTER || !web->num_defs
2628 || !web->num_uses)
2629 continue;
2630 for (i = 0; i < web->num_defs; i++)
2632 rtx insn;
2633 rtx set = single_set (insn = DF_REF_INSN (web->defs[i]));
2634 rtx src;
2635 if (!set)
2636 break;
2637 src = SET_SRC (set);
2638 /* When only subregs of the web are set it isn't easily
2639 rematerializable. */
2640 if (!rtx_equal_p (SET_DEST (set), web->orig_x))
2641 break;
2642 /* If we already have a pattern it must be equal to the current. */
2643 if (pat && !rtx_equal_p (pat, src))
2644 break;
2645 /* Don't do the expensive checks multiple times. */
2646 if (pat)
2647 continue;
2648 /* For now we allow only constant sources. */
2649 if ((CONSTANT_P (src)
2650 /* If the whole thing is stable already, it is a source for
2651 remat, no matter how complicated (probably all needed
2652 resources for it are live everywhere, and don't take
2653 additional register resources). */
2654 /* XXX Currently we can't use patterns which contain
2655 pseudos, _even_ if they are stable. The code simply isn't
2656 prepared for that. All those operands can't be spilled (or
2657 the dependent remat webs are not remat anymore), so they
2658 would be oldwebs in the next iteration. But currently
2659 oldwebs can't have their references changed. The
2660 incremental machinery barfs on that. */
2661 || (!rtx_unstable_p (src) && !contains_pseudo (src))
2662 /* Additionally also memrefs to stack-slots are useful, when
2663 we created them ourself. They might not have set their
2664 unchanging flag set, but nevertheless they are stable across
2665 the livetime in question. */
2666 || (GET_CODE (src) == MEM
2667 && INSN_UID (insn) >= orig_max_uid
2668 && memref_is_stack_slot (src)))
2669 /* And we must be able to construct an insn without
2670 side-effects to actually load that value into a reg. */
2671 && want_to_remat (src))
2672 pat = src;
2673 else
2674 break;
2676 if (pat && i == web->num_defs)
2677 web->pattern = pat;
2681 /* Determine the spill costs of all webs. */
2683 static void
2684 determine_web_costs ()
2686 struct dlist *d;
2687 for (d = WEBS(INITIAL); d; d = d->next)
2689 unsigned int i, num_loads;
2690 int load_cost, store_cost;
2691 unsigned HOST_WIDE_INT w;
2692 struct web *web = DLIST_WEB (d);
2693 if (web->type == PRECOLORED)
2694 continue;
2695 /* Get costs for one load/store. Note that we offset them by 1,
2696 because some patterns have a zero rtx_cost(), but we of course
2697 still need the actual load/store insns. With zero all those
2698 webs would be the same, no matter how often and where
2699 they are used. */
2700 if (web->pattern)
2702 /* This web is rematerializable. Beware, we set store_cost to
2703 zero optimistically assuming, that we indeed don't emit any
2704 stores in the spill-code addition. This might be wrong if
2705 at the point of the load not all needed resources are
2706 available, in which case we emit a stack-based load, for
2707 which we in turn need the according stores. */
2708 load_cost = 1 + rtx_cost (web->pattern, 0);
2709 store_cost = 0;
2711 else
2713 load_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2714 web->regclass, 1);
2715 store_cost = 1 + MEMORY_MOVE_COST (GET_MODE (web->orig_x),
2716 web->regclass, 0);
2718 /* We create only loads at deaths, whose number is in span_deaths. */
2719 num_loads = MIN (web->span_deaths, web->num_uses);
2720 for (w = 0, i = 0; i < web->num_uses; i++)
2721 w += DF_REF_BB (web->uses[i])->frequency + 1;
2722 if (num_loads < web->num_uses)
2723 w = (w * num_loads + web->num_uses - 1) / web->num_uses;
2724 web->spill_cost = w * load_cost;
2725 if (store_cost)
2727 for (w = 0, i = 0; i < web->num_defs; i++)
2728 w += DF_REF_BB (web->defs[i])->frequency + 1;
2729 web->spill_cost += w * store_cost;
2731 web->orig_spill_cost = web->spill_cost;
2735 /* Detect webs which are set in a conditional jump insn (possibly a
2736 decrement-and-branch type of insn), and mark them not to be
2737 spillable. The stores for them would need to be placed on edges,
2738 which destroys the CFG. (Somewhen we want to deal with that XXX) */
2740 static void
2741 detect_webs_set_in_cond_jump ()
2743 basic_block bb;
2744 FOR_EACH_BB (bb)
2745 if (GET_CODE (bb->end) == JUMP_INSN)
2747 struct df_link *link;
2748 for (link = DF_INSN_DEFS (df, bb->end); link; link = link->next)
2749 if (link->ref && DF_REF_REGNO (link->ref) >= FIRST_PSEUDO_REGISTER)
2751 struct web *web = def2web[DF_REF_ID (link->ref)];
2752 web->orig_spill_temp = web->spill_temp = 3;
2757 /* Second top-level function of this file.
2758 Converts the connected web parts to full webs. This means, it allocates
2759 all webs, and initializes all fields, including detecting spill
2760 temporaries. It does not distribute moves to their corresponding webs,
2761 though. */
2763 static void
2764 make_webs (df)
2765 struct df *df;
2767 /* First build all the webs itself. They are not related with
2768 others yet. */
2769 parts_to_webs (df);
2770 /* Now detect spill temporaries to initialize their usable_regs set. */
2771 detect_spill_temps ();
2772 detect_webs_set_in_cond_jump ();
2773 /* And finally relate them to each other, meaning to record all possible
2774 conflicts between webs (see the comment there). */
2775 conflicts_between_webs (df);
2776 detect_remat_webs ();
2777 determine_web_costs ();
2780 /* Distribute moves to the corresponding webs. */
2782 static void
2783 moves_to_webs (df)
2784 struct df *df;
2786 struct df_link *link;
2787 struct move_list *ml;
2789 /* Distribute all moves to their corresponding webs, making sure,
2790 each move is in a web maximally one time (happens on some strange
2791 insns). */
2792 for (ml = wl_moves; ml; ml = ml->next)
2794 struct move *m = ml->move;
2795 struct web *web;
2796 struct move_list *newml;
2797 if (!m)
2798 continue;
2799 m->type = WORKLIST;
2800 m->dlink = NULL;
2801 /* Multiple defs/uses can happen in moves involving hard-regs in
2802 a wider mode. For those df.* creates use/def references for each
2803 real hard-reg involved. For coalescing we are interested in
2804 the smallest numbered hard-reg. */
2805 for (link = DF_INSN_DEFS (df, m->insn); link; link = link->next)
2806 if (link->ref)
2808 web = def2web[DF_REF_ID (link->ref)];
2809 web = find_web_for_subweb (web);
2810 if (!m->target_web || web->regno < m->target_web->regno)
2811 m->target_web = web;
2813 for (link = DF_INSN_USES (df, m->insn); link; link = link->next)
2814 if (link->ref)
2816 web = use2web[DF_REF_ID (link->ref)];
2817 web = find_web_for_subweb (web);
2818 if (!m->source_web || web->regno < m->source_web->regno)
2819 m->source_web = web;
2821 if (m->source_web && m->target_web
2822 /* If the usable_regs don't intersect we can't coalesce the two
2823 webs anyway, as this is no simple copy insn (it might even
2824 need an intermediate stack temp to execute this "copy" insn). */
2825 && hard_regs_intersect_p (&m->source_web->usable_regs,
2826 &m->target_web->usable_regs))
2828 if (!flag_ra_optimistic_coalescing)
2830 struct move_list *test = m->source_web->moves;
2831 for (; test && test->move != m; test = test->next);
2832 if (! test)
2834 newml = (struct move_list*)
2835 ra_alloc (sizeof (struct move_list));
2836 newml->move = m;
2837 newml->next = m->source_web->moves;
2838 m->source_web->moves = newml;
2840 test = m->target_web->moves;
2841 for (; test && test->move != m; test = test->next);
2842 if (! test)
2844 newml = (struct move_list*)
2845 ra_alloc (sizeof (struct move_list));
2846 newml->move = m;
2847 newml->next = m->target_web->moves;
2848 m->target_web->moves = newml;
2852 else
2853 /* Delete this move. */
2854 ml->move = NULL;
2858 /* Handle tricky asm insns.
2859 Supposed to create conflicts to hardregs which aren't allowed in
2860 the constraints. Doesn't actually do that, as it might confuse
2861 and constrain the allocator too much. */
2863 static void
2864 handle_asm_insn (df, insn)
2865 struct df *df;
2866 rtx insn;
2868 const char *constraints[MAX_RECOG_OPERANDS];
2869 enum machine_mode operand_mode[MAX_RECOG_OPERANDS];
2870 int i, noperands, in_output;
2871 HARD_REG_SET clobbered, allowed, conflict;
2872 rtx pat;
2873 if (! INSN_P (insn)
2874 || (noperands = asm_noperands (PATTERN (insn))) < 0)
2875 return;
2876 pat = PATTERN (insn);
2877 CLEAR_HARD_REG_SET (clobbered);
2879 if (GET_CODE (pat) == PARALLEL)
2880 for (i = 0; i < XVECLEN (pat, 0); i++)
2882 rtx t = XVECEXP (pat, 0, i);
2883 if (GET_CODE (t) == CLOBBER && GET_CODE (XEXP (t, 0)) == REG
2884 && REGNO (XEXP (t, 0)) < FIRST_PSEUDO_REGISTER)
2885 SET_HARD_REG_BIT (clobbered, REGNO (XEXP (t, 0)));
2888 decode_asm_operands (pat, recog_data.operand, recog_data.operand_loc,
2889 constraints, operand_mode);
2890 in_output = 1;
2891 for (i = 0; i < noperands; i++)
2893 const char *p = constraints[i];
2894 int cls = (int) NO_REGS;
2895 struct df_link *link;
2896 rtx reg;
2897 struct web *web;
2898 int nothing_allowed = 1;
2899 reg = recog_data.operand[i];
2901 /* Look, if the constraints apply to a pseudo reg, and not to
2902 e.g. a mem. */
2903 while (GET_CODE (reg) == SUBREG
2904 || GET_CODE (reg) == ZERO_EXTRACT
2905 || GET_CODE (reg) == SIGN_EXTRACT
2906 || GET_CODE (reg) == STRICT_LOW_PART)
2907 reg = XEXP (reg, 0);
2908 if (GET_CODE (reg) != REG || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2909 continue;
2911 /* Search the web corresponding to this operand. We depend on
2912 that decode_asm_operands() places the output operands
2913 before the input operands. */
2914 while (1)
2916 if (in_output)
2917 link = df->insns[INSN_UID (insn)].defs;
2918 else
2919 link = df->insns[INSN_UID (insn)].uses;
2920 while (link && link->ref && DF_REF_REAL_REG (link->ref) != reg)
2921 link = link->next;
2922 if (!link || !link->ref)
2924 if (in_output)
2925 in_output = 0;
2926 else
2927 abort ();
2929 else
2930 break;
2932 if (in_output)
2933 web = def2web[DF_REF_ID (link->ref)];
2934 else
2935 web = use2web[DF_REF_ID (link->ref)];
2936 reg = DF_REF_REG (link->ref);
2938 /* Find the constraints, noting the allowed hardregs in allowed. */
2939 CLEAR_HARD_REG_SET (allowed);
2940 while (1)
2942 char c = *p;
2944 if (c == '\0' || c == ',' || c == '#')
2946 /* End of one alternative - mark the regs in the current
2947 class, and reset the class. */
2948 p++;
2949 IOR_HARD_REG_SET (allowed, reg_class_contents[cls]);
2950 if (cls != NO_REGS)
2951 nothing_allowed = 0;
2952 cls = NO_REGS;
2953 if (c == '#')
2954 do {
2955 c = *p++;
2956 } while (c != '\0' && c != ',');
2957 if (c == '\0')
2958 break;
2959 continue;
2962 switch (c)
2964 case '=': case '+': case '*': case '%': case '?': case '!':
2965 case '0': case '1': case '2': case '3': case '4': case 'm':
2966 case '<': case '>': case 'V': case 'o': case '&': case 'E':
2967 case 'F': case 's': case 'i': case 'n': case 'X': case 'I':
2968 case 'J': case 'K': case 'L': case 'M': case 'N': case 'O':
2969 case 'P':
2970 break;
2972 case 'p':
2973 cls = (int) reg_class_subunion[cls][(int) BASE_REG_CLASS];
2974 nothing_allowed = 0;
2975 break;
2977 case 'g':
2978 case 'r':
2979 cls = (int) reg_class_subunion[cls][(int) GENERAL_REGS];
2980 nothing_allowed = 0;
2981 break;
2983 default:
2984 cls =
2985 (int) reg_class_subunion[cls][(int)
2986 REG_CLASS_FROM_CONSTRAINT (c,
2987 p)];
2989 p += CONSTRAINT_LEN (c, p);
2992 /* Now make conflicts between this web, and all hardregs, which
2993 are not allowed by the constraints. */
2994 if (nothing_allowed)
2996 /* If we had no real constraints nothing was explicitly
2997 allowed, so we allow the whole class (i.e. we make no
2998 additional conflicts). */
2999 CLEAR_HARD_REG_SET (conflict);
3001 else
3003 COPY_HARD_REG_SET (conflict, usable_regs
3004 [reg_preferred_class (web->regno)]);
3005 IOR_HARD_REG_SET (conflict, usable_regs
3006 [reg_alternate_class (web->regno)]);
3007 AND_COMPL_HARD_REG_SET (conflict, allowed);
3008 /* We can't yet establish these conflicts. Reload must go first
3009 (or better said, we must implement some functionality of reload).
3010 E.g. if some operands must match, and they need the same color
3011 we don't see yet, that they do not conflict (because they match).
3012 For us it looks like two normal references with different DEFs,
3013 so they conflict, and as they both need the same color, the
3014 graph becomes uncolorable. */
3015 #if 0
3016 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
3017 if (TEST_HARD_REG_BIT (conflict, c))
3018 record_conflict (web, hardreg2web[c]);
3019 #endif
3021 if (rtl_dump_file)
3023 int c;
3024 ra_debug_msg (DUMP_ASM, " ASM constrain Web %d conflicts with:", web->id);
3025 for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
3026 if (TEST_HARD_REG_BIT (conflict, c))
3027 ra_debug_msg (DUMP_ASM, " %d", c);
3028 ra_debug_msg (DUMP_ASM, "\n");
3033 /* The real toplevel function in this file.
3034 Build (or rebuilds) the complete interference graph with webs
3035 and conflicts. */
3037 void
3038 build_i_graph (df)
3039 struct df *df;
3041 rtx insn;
3043 init_web_parts (df);
3045 sbitmap_zero (move_handled);
3046 wl_moves = NULL;
3048 build_web_parts_and_conflicts (df);
3050 /* For read-modify-write instructions we may have created two webs.
3051 Reconnect them here. (s.a.) */
3052 connect_rmw_web_parts (df);
3054 /* The webs are conceptually complete now, but still scattered around as
3055 connected web parts. Collect all information and build the webs
3056 including all conflicts between webs (instead web parts). */
3057 make_webs (df);
3058 moves_to_webs (df);
3060 /* Look for additional constraints given by asms. */
3061 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3062 handle_asm_insn (df, insn);
3065 /* Allocates or reallocates most memory for the interference graph and
3066 associated structures. If it reallocates memory (meaning, this is not
3067 the first pass), this also changes some structures to reflect the
3068 additional entries in various array, and the higher number of
3069 defs and uses. */
3071 void
3072 ra_build_realloc (df)
3073 struct df *df;
3075 struct web_part *last_web_parts = web_parts;
3076 struct web **last_def2web = def2web;
3077 struct web **last_use2web = use2web;
3078 sbitmap last_live_over_abnormal = live_over_abnormal;
3079 unsigned int i;
3080 struct dlist *d;
3081 move_handled = sbitmap_alloc (get_max_uid () );
3082 web_parts = (struct web_part *) xcalloc (df->def_id + df->use_id,
3083 sizeof web_parts[0]);
3084 def2web = (struct web **) xcalloc (df->def_id + df->use_id,
3085 sizeof def2web[0]);
3086 use2web = &def2web[df->def_id];
3087 live_over_abnormal = sbitmap_alloc (df->use_id);
3088 sbitmap_zero (live_over_abnormal);
3090 /* First go through all old defs and uses. */
3091 for (i = 0; i < last_def_id + last_use_id; i++)
3093 /* And relocate them to the new array. This is made ugly by the
3094 fact, that defs and uses are placed consecutive into one array. */
3095 struct web_part *dest = &web_parts[i < last_def_id
3096 ? i : (df->def_id + i - last_def_id)];
3097 struct web_part *up;
3098 *dest = last_web_parts[i];
3099 up = dest->uplink;
3100 dest->uplink = NULL;
3102 /* Also relocate the uplink to point into the new array. */
3103 if (up && up->ref)
3105 unsigned int id = DF_REF_ID (up->ref);
3106 if (up < &last_web_parts[last_def_id])
3108 if (df->defs[id])
3109 dest->uplink = &web_parts[DF_REF_ID (up->ref)];
3111 else if (df->uses[id])
3112 dest->uplink = &web_parts[df->def_id + DF_REF_ID (up->ref)];
3116 /* Also set up the def2web and use2web arrays, from the last pass.i
3117 Remember also the state of live_over_abnormal. */
3118 for (i = 0; i < last_def_id; i++)
3120 struct web *web = last_def2web[i];
3121 if (web)
3123 web = find_web_for_subweb (web);
3124 if (web->type != FREE && web->type != PRECOLORED)
3125 def2web[i] = last_def2web[i];
3128 for (i = 0; i < last_use_id; i++)
3130 struct web *web = last_use2web[i];
3131 if (web)
3133 web = find_web_for_subweb (web);
3134 if (web->type != FREE && web->type != PRECOLORED)
3135 use2web[i] = last_use2web[i];
3137 if (TEST_BIT (last_live_over_abnormal, i))
3138 SET_BIT (live_over_abnormal, i);
3141 /* We don't have any subwebs for now. Somewhen we might want to
3142 remember them too, instead of recreating all of them every time.
3143 The problem is, that which subwebs we need, depends also on what
3144 other webs and subwebs exist, and which conflicts are there.
3145 OTOH it should be no problem, if we had some more subwebs than strictly
3146 needed. Later. */
3147 for (d = WEBS(FREE); d; d = d->next)
3149 struct web *web = DLIST_WEB (d);
3150 struct web *wnext;
3151 for (web = web->subreg_next; web; web = wnext)
3153 wnext = web->subreg_next;
3154 free (web);
3156 DLIST_WEB (d)->subreg_next = NULL;
3159 /* The uses we anyway are going to check, are not yet live over an abnormal
3160 edge. In fact, they might actually not anymore, due to added
3161 loads. */
3162 if (last_check_uses)
3163 sbitmap_difference (live_over_abnormal, live_over_abnormal,
3164 last_check_uses);
3166 if (last_def_id || last_use_id)
3168 sbitmap_free (last_live_over_abnormal);
3169 free (last_web_parts);
3170 free (last_def2web);
3172 if (!last_max_uid)
3174 /* Setup copy cache, for copy_insn_p (). */
3175 copy_cache = (struct copy_p_cache *)
3176 xcalloc (get_max_uid (), sizeof (copy_cache[0]));
3177 init_bb_info ();
3179 else
3181 copy_cache = (struct copy_p_cache *)
3182 xrealloc (copy_cache, get_max_uid () * sizeof (copy_cache[0]));
3183 memset (&copy_cache[last_max_uid], 0,
3184 (get_max_uid () - last_max_uid) * sizeof (copy_cache[0]));
3188 /* Free up/clear some memory, only needed for one pass. */
3190 void
3191 ra_build_free ()
3193 struct dlist *d;
3194 unsigned int i;
3196 /* Clear the moves associated with a web (we also need to look into
3197 subwebs here). */
3198 for (i = 0; i < num_webs; i++)
3200 struct web *web = ID2WEB (i);
3201 if (!web)
3202 abort ();
3203 if (i >= num_webs - num_subwebs
3204 && (web->conflict_list || web->orig_conflict_list))
3205 abort ();
3206 web->moves = NULL;
3208 /* All webs in the free list have no defs or uses anymore. */
3209 for (d = WEBS(FREE); d; d = d->next)
3211 struct web *web = DLIST_WEB (d);
3212 if (web->defs)
3213 free (web->defs);
3214 web->defs = NULL;
3215 if (web->uses)
3216 free (web->uses);
3217 web->uses = NULL;
3218 /* We can't free the subwebs here, as they are referenced from
3219 def2web[], and possibly needed in the next ra_build_realloc().
3220 We free them there (or in free_all_mem()). */
3223 /* Free all conflict bitmaps from web parts. Note that we clear
3224 _all_ these conflicts, and don't rebuild them next time for uses
3225 which aren't rechecked. This mean, that those conflict bitmaps
3226 only contain the incremental information. The cumulative one
3227 is still contained in the edges of the I-graph, i.e. in
3228 conflict_list (or orig_conflict_list) of the webs. */
3229 for (i = 0; i < df->def_id + df->use_id; i++)
3231 struct tagged_conflict *cl;
3232 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3234 if (cl->conflicts)
3235 BITMAP_XFREE (cl->conflicts);
3237 web_parts[i].sub_conflicts = NULL;
3240 wl_moves = NULL;
3242 free (id2web);
3243 free (move_handled);
3244 sbitmap_free (sup_igraph);
3245 sbitmap_free (igraph);
3248 /* Free all memory for the interference graph structures. */
3250 void
3251 ra_build_free_all (df)
3252 struct df *df;
3254 unsigned int i;
3256 free_bb_info ();
3257 free (copy_cache);
3258 copy_cache = NULL;
3259 for (i = 0; i < df->def_id + df->use_id; i++)
3261 struct tagged_conflict *cl;
3262 for (cl = web_parts[i].sub_conflicts; cl; cl = cl->next)
3264 if (cl->conflicts)
3265 BITMAP_XFREE (cl->conflicts);
3267 web_parts[i].sub_conflicts = NULL;
3269 sbitmap_free (live_over_abnormal);
3270 free (web_parts);
3271 web_parts = NULL;
3272 if (last_check_uses)
3273 sbitmap_free (last_check_uses);
3274 last_check_uses = NULL;
3275 free (def2web);
3276 use2web = NULL;
3277 def2web = NULL;
3280 #include "gt-ra-build.h"
3283 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4: