1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GNU CC.
10 GNU CC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 2, or (at your option) any
15 GNU CC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GNU CC; see the file COPYING. If not, write to the Free
22 the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
42 extern char *reg_known_equiv_p
;
43 extern rtx
*reg_known_value
;
45 static regset_head reg_pending_sets_head
;
46 static regset_head reg_pending_clobbers_head
;
48 static regset reg_pending_sets
;
49 static regset reg_pending_clobbers
;
50 static int reg_pending_sets_all
;
52 /* To speed up the test for duplicate dependency links we keep a
53 record of dependencies created by add_dependence when the average
54 number of instructions in a basic block is very large.
56 Studies have shown that there is typically around 5 instructions between
57 branches for typical C code. So we can make a guess that the average
58 basic block is approximately 5 instructions long; we will choose 100X
59 the average size as a very large basic block.
61 Each insn has associated bitmaps for its dependencies. Each bitmap
62 has enough entries to represent a dependency on any other insn in
63 the insn chain. All bitmap for true dependencies cache is
64 allocated then the rest two ones are also allocated. */
65 static sbitmap
*true_dependency_cache
;
66 static sbitmap
*anti_dependency_cache
;
67 static sbitmap
*output_dependency_cache
;
69 /* To speed up checking consistency of formed forward insn
70 dependencies we use the following cache. Another possible solution
71 could be switching off checking duplication of insns in forward
73 #ifdef ENABLE_CHECKING
74 static sbitmap
*forward_dependency_cache
;
77 static int deps_may_trap_p
PARAMS ((rtx
));
78 static void remove_dependence
PARAMS ((rtx
, rtx
));
79 static void set_sched_group_p
PARAMS ((rtx
));
81 static void flush_pending_lists
PARAMS ((struct deps
*, rtx
, int));
82 static void sched_analyze_1
PARAMS ((struct deps
*, rtx
, rtx
));
83 static void sched_analyze_2
PARAMS ((struct deps
*, rtx
, rtx
));
84 static void sched_analyze_insn
PARAMS ((struct deps
*, rtx
, rtx
, rtx
));
85 static rtx group_leader
PARAMS ((rtx
));
87 static rtx get_condition
PARAMS ((rtx
));
88 static int conditions_mutex_p
PARAMS ((rtx
, rtx
));
90 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
96 rtx addr
= XEXP (mem
, 0);
99 && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
100 && reg_known_value
[REGNO (addr
)])
101 addr
= reg_known_value
[REGNO (addr
)];
102 return rtx_addr_can_trap_p (addr
);
105 /* Return the INSN_LIST containing INSN in LIST, or NULL
106 if LIST does not contain INSN. */
109 find_insn_list (insn
, list
)
115 if (XEXP (list
, 0) == insn
)
117 list
= XEXP (list
, 1);
122 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
126 find_insn_mem_list (insn
, x
, list
, list1
)
132 if (XEXP (list
, 0) == insn
133 && XEXP (list1
, 0) == x
)
135 list
= XEXP (list
, 1);
136 list1
= XEXP (list1
, 1);
141 /* Find the condition under which INSN is executed. */
147 rtx pat
= PATTERN (insn
);
152 if (GET_CODE (pat
) == COND_EXEC
)
153 return COND_EXEC_TEST (pat
);
154 if (GET_CODE (insn
) != JUMP_INSN
)
156 if (GET_CODE (pat
) != SET
|| SET_SRC (pat
) != pc_rtx
)
158 if (GET_CODE (SET_DEST (pat
)) != IF_THEN_ELSE
)
160 pat
= SET_DEST (pat
);
161 cond
= XEXP (pat
, 0);
162 if (GET_CODE (XEXP (cond
, 1)) == LABEL_REF
163 && XEXP (cond
, 2) == pc_rtx
)
165 else if (GET_CODE (XEXP (cond
, 2)) == LABEL_REF
166 && XEXP (cond
, 1) == pc_rtx
)
167 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond
)), GET_MODE (cond
),
168 XEXP (cond
, 0), XEXP (cond
, 1));
173 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
176 conditions_mutex_p (cond1
, cond2
)
179 if (GET_RTX_CLASS (GET_CODE (cond1
)) == '<'
180 && GET_RTX_CLASS (GET_CODE (cond2
)) == '<'
181 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
))
182 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
183 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
188 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
189 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
190 of dependence that this link represents. */
193 add_dependence (insn
, elem
, dep_type
)
196 enum reg_note dep_type
;
200 enum reg_note present_dep_type
;
203 /* Don't depend an insn on itself. */
207 /* We can get a dependency on deleted insns due to optimizations in
208 the register allocation and reloading or due to splitting. Any
209 such dependency is useless and can be ignored. */
210 if (GET_CODE (elem
) == NOTE
)
213 /* flow.c doesn't handle conditional lifetimes entirely correctly;
214 calls mess up the conditional lifetimes. */
215 if (GET_CODE (insn
) != CALL_INSN
&& GET_CODE (elem
) != CALL_INSN
)
217 cond1
= get_condition (insn
);
218 cond2
= get_condition (elem
);
219 if (cond1
&& cond2
&& conditions_mutex_p (cond1
, cond2
))
223 /* If elem is part of a sequence that must be scheduled together, then
224 make the dependence point to the last insn of the sequence.
225 When HAVE_cc0, it is possible for NOTEs to exist between users and
226 setters of the condition codes, so we must skip past notes here.
227 Otherwise, NOTEs are impossible here. */
228 next
= next_nonnote_insn (elem
);
229 if (next
&& SCHED_GROUP_P (next
)
230 && GET_CODE (next
) != CODE_LABEL
)
232 /* Notes will never intervene here though, so don't bother checking
235 /* We must reject CODE_LABELs, so that we don't get confused by one
236 that has LABEL_PRESERVE_P set, which is represented by the same
237 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
241 while ((nnext
= next_nonnote_insn (next
)) != NULL
242 && SCHED_GROUP_P (nnext
)
243 && GET_CODE (nnext
) != CODE_LABEL
)
246 /* Again, don't depend an insn on itself. */
250 /* Make the dependence to NEXT, the last insn of the group, instead
251 of the original ELEM. */
256 #ifdef INSN_SCHEDULING
257 /* ??? No good way to tell from here whether we're doing interblock
258 scheduling. Possibly add another callback. */
260 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
261 No need for interblock dependences with calls, since
262 calls are not moved between blocks. Note: the edge where
263 elem is a CALL is still required. */
264 if (GET_CODE (insn
) == CALL_INSN
265 && (INSN_BB (elem
) != INSN_BB (insn
)))
269 /* If we already have a dependency for ELEM, then we do not need to
270 do anything. Avoiding the list walk below can cut compile times
271 dramatically for some code. */
272 if (true_dependency_cache
!= NULL
)
274 if (anti_dependency_cache
== NULL
|| output_dependency_cache
== NULL
)
276 if (TEST_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
)))
277 present_dep_type
= 0;
278 else if (TEST_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
280 present_dep_type
= REG_DEP_ANTI
;
281 else if (TEST_BIT (output_dependency_cache
[INSN_LUID (insn
)],
283 present_dep_type
= REG_DEP_OUTPUT
;
286 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
291 /* Check that we don't already have this dependence. */
293 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
294 if (XEXP (link
, 0) == elem
)
296 #ifdef INSN_SCHEDULING
297 /* Clear corresponding cache entry because type of the link
299 if (true_dependency_cache
!= NULL
)
301 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
302 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
304 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
305 && output_dependency_cache
)
306 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
313 /* If this is a more restrictive type of dependence than the existing
314 one, then change the existing dependence to this type. */
315 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
316 PUT_REG_NOTE_KIND (link
, dep_type
);
318 #ifdef INSN_SCHEDULING
319 /* If we are adding a dependency to INSN's LOG_LINKs, then
320 note that in the bitmap caches of dependency information. */
321 if (true_dependency_cache
!= NULL
)
323 if ((int)REG_NOTE_KIND (link
) == 0)
324 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
326 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
327 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
329 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
330 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
336 /* Might want to check one level of transitivity to save conses. */
338 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
339 LOG_LINKS (insn
) = link
;
341 /* Insn dependency, not data dependency. */
342 PUT_REG_NOTE_KIND (link
, dep_type
);
344 #ifdef INSN_SCHEDULING
345 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
346 in the bitmap caches of dependency information. */
347 if (true_dependency_cache
!= NULL
)
349 if ((int)dep_type
== 0)
350 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
351 else if (dep_type
== REG_DEP_ANTI
)
352 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
353 else if (dep_type
== REG_DEP_OUTPUT
)
354 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
359 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
360 of INSN. Abort if not found. */
363 remove_dependence (insn
, elem
)
367 rtx prev
, link
, next
;
370 for (prev
= 0, link
= LOG_LINKS (insn
); link
; link
= next
)
372 next
= XEXP (link
, 1);
373 if (XEXP (link
, 0) == elem
)
376 XEXP (prev
, 1) = next
;
378 LOG_LINKS (insn
) = next
;
380 #ifdef INSN_SCHEDULING
381 /* If we are removing a dependency from the LOG_LINKS list,
382 make sure to remove it from the cache too. */
383 if (true_dependency_cache
!= NULL
)
385 if (REG_NOTE_KIND (link
) == 0)
386 RESET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
388 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
389 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
391 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
392 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
397 free_INSN_LIST_node (link
);
410 /* Return an insn which represents a SCHED_GROUP, which is
411 the last insn in the group. */
422 insn
= next_nonnote_insn (insn
);
424 while (insn
&& SCHED_GROUP_P (insn
) && (GET_CODE (insn
) != CODE_LABEL
));
429 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
430 goes along with that. */
433 set_sched_group_p (insn
)
438 SCHED_GROUP_P (insn
) = 1;
440 /* There may be a note before this insn now, but all notes will
441 be removed before we actually try to schedule the insns, so
442 it won't cause a problem later. We must avoid it here though. */
443 prev
= prev_nonnote_insn (insn
);
445 /* Make a copy of all dependencies on the immediately previous insn,
446 and add to this insn. This is so that all the dependencies will
447 apply to the group. Remove an explicit dependence on this insn
448 as SCHED_GROUP_P now represents it. */
450 if (find_insn_list (prev
, LOG_LINKS (insn
)))
451 remove_dependence (insn
, prev
);
453 for (link
= LOG_LINKS (prev
); link
; link
= XEXP (link
, 1))
454 add_dependence (insn
, XEXP (link
, 0), REG_NOTE_KIND (link
));
457 /* Process an insn's memory dependencies. There are four kinds of
460 (0) read dependence: read follows read
461 (1) true dependence: read follows write
462 (2) anti dependence: write follows read
463 (3) output dependence: write follows write
465 We are careful to build only dependencies which actually exist, and
466 use transitivity to avoid building too many links. */
468 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
469 The MEM is a memory reference contained within INSN, which we are saving
470 so that we can do memory aliasing on it. */
473 add_insn_mem_dependence (deps
, insn_list
, mem_list
, insn
, mem
)
475 rtx
*insn_list
, *mem_list
, insn
, mem
;
479 link
= alloc_INSN_LIST (insn
, *insn_list
);
482 link
= alloc_EXPR_LIST (VOIDmode
, mem
, *mem_list
);
485 deps
->pending_lists_length
++;
488 /* Make a dependency between every memory reference on the pending lists
489 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
493 flush_pending_lists (deps
, insn
, only_write
)
501 while (deps
->pending_read_insns
&& ! only_write
)
503 add_dependence (insn
, XEXP (deps
->pending_read_insns
, 0),
506 link
= deps
->pending_read_insns
;
507 deps
->pending_read_insns
= XEXP (deps
->pending_read_insns
, 1);
508 free_INSN_LIST_node (link
);
510 link
= deps
->pending_read_mems
;
511 deps
->pending_read_mems
= XEXP (deps
->pending_read_mems
, 1);
512 free_EXPR_LIST_node (link
);
514 while (deps
->pending_write_insns
)
516 add_dependence (insn
, XEXP (deps
->pending_write_insns
, 0),
519 link
= deps
->pending_write_insns
;
520 deps
->pending_write_insns
= XEXP (deps
->pending_write_insns
, 1);
521 free_INSN_LIST_node (link
);
523 link
= deps
->pending_write_mems
;
524 deps
->pending_write_mems
= XEXP (deps
->pending_write_mems
, 1);
525 free_EXPR_LIST_node (link
);
527 deps
->pending_lists_length
= 0;
529 /* last_pending_memory_flush is now a list of insns. */
530 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
531 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
533 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
534 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
537 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
538 rtx, X, creating all dependencies generated by the write to the
539 destination of X, and reads of everything mentioned. */
542 sched_analyze_1 (deps
, x
, insn
)
548 register rtx dest
= XEXP (x
, 0);
549 enum rtx_code code
= GET_CODE (x
);
554 if (GET_CODE (dest
) == PARALLEL
)
558 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
559 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
560 sched_analyze_1 (deps
,
561 gen_rtx_CLOBBER (VOIDmode
,
562 XEXP (XVECEXP (dest
, 0, i
), 0)),
565 if (GET_CODE (x
) == SET
)
566 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
570 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
571 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
573 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
575 /* The second and third arguments are values read by this insn. */
576 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
577 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
579 dest
= XEXP (dest
, 0);
582 if (GET_CODE (dest
) == REG
)
586 regno
= REGNO (dest
);
588 /* A hard reg in a wide mode may really be multiple registers.
589 If so, mark all of them just like the first. */
590 if (regno
< FIRST_PSEUDO_REGISTER
)
592 i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
598 for (u
= deps
->reg_last
[r
].uses
; u
; u
= XEXP (u
, 1))
599 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
601 for (u
= deps
->reg_last
[r
].sets
; u
; u
= XEXP (u
, 1))
602 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
604 /* Clobbers need not be ordered with respect to one
605 another, but sets must be ordered with respect to a
609 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
610 free_INSN_LIST_list (&deps
->reg_last
[r
].uses
);
611 for (u
= deps
->reg_last
[r
].clobbers
; u
; u
= XEXP (u
, 1))
612 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
613 SET_REGNO_REG_SET (reg_pending_sets
, r
);
616 SET_REGNO_REG_SET (reg_pending_clobbers
, r
);
618 /* Function calls clobber all call_used regs. */
619 if (global_regs
[r
] || (code
== SET
&& call_used_regs
[r
]))
620 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
621 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
624 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
625 it does not reload. Ignore these as they have served their
627 else if (regno
>= deps
->max_reg
)
629 if (GET_CODE (PATTERN (insn
)) != USE
630 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
637 for (u
= deps
->reg_last
[regno
].uses
; u
; u
= XEXP (u
, 1))
638 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
640 for (u
= deps
->reg_last
[regno
].sets
; u
; u
= XEXP (u
, 1))
641 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
645 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
646 free_INSN_LIST_list (&deps
->reg_last
[regno
].uses
);
647 for (u
= deps
->reg_last
[regno
].clobbers
; u
; u
= XEXP (u
, 1))
648 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
649 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
652 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
654 /* Pseudos that are REG_EQUIV to something may be replaced
655 by that during reloading. We need only add dependencies for
656 the address in the REG_EQUIV note. */
657 if (!reload_completed
658 && reg_known_equiv_p
[regno
]
659 && GET_CODE (reg_known_value
[regno
]) == MEM
)
660 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
662 /* Don't let it cross a call after scheduling if it doesn't
663 already cross one. */
665 if (REG_N_CALLS_CROSSED (regno
) == 0)
666 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
667 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
670 else if (GET_CODE (dest
) == MEM
)
672 /* Writing memory. */
674 if (deps
->pending_lists_length
> 32)
676 /* Flush all pending reads and writes to prevent the pending lists
677 from getting any larger. Insn scheduling runs too slowly when
678 these lists get long. The number 32 was chosen because it
679 seems like a reasonable number. When compiling GCC with itself,
680 this flush occurs 8 times for sparc, and 10 times for m88k using
682 flush_pending_lists (deps
, insn
, 0);
687 rtx pending
, pending_mem
;
689 pending
= deps
->pending_read_insns
;
690 pending_mem
= deps
->pending_read_mems
;
693 if (anti_dependence (XEXP (pending_mem
, 0), dest
))
694 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
696 pending
= XEXP (pending
, 1);
697 pending_mem
= XEXP (pending_mem
, 1);
700 pending
= deps
->pending_write_insns
;
701 pending_mem
= deps
->pending_write_mems
;
704 if (output_dependence (XEXP (pending_mem
, 0), dest
))
705 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
707 pending
= XEXP (pending
, 1);
708 pending_mem
= XEXP (pending_mem
, 1);
711 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
712 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
714 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
715 &deps
->pending_write_mems
, insn
, dest
);
717 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
721 if (GET_CODE (x
) == SET
)
722 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
725 /* Analyze the uses of memory and registers in rtx X in INSN. */
728 sched_analyze_2 (deps
, x
, insn
)
735 register enum rtx_code code
;
736 register const char *fmt
;
750 /* Ignore constants. Note that we must handle CONST_DOUBLE here
751 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
752 this does not mean that this insn is using cc0. */
757 /* User of CC0 depends on immediately preceding insn. */
758 set_sched_group_p (insn
);
765 int regno
= REGNO (x
);
766 if (regno
< FIRST_PSEUDO_REGISTER
)
770 i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
774 deps
->reg_last
[r
].uses
775 = alloc_INSN_LIST (insn
, deps
->reg_last
[r
].uses
);
776 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, r
);
778 for (u
= deps
->reg_last
[r
].sets
; u
; u
= XEXP (u
, 1))
779 add_dependence (insn
, XEXP (u
, 0), 0);
781 /* ??? This should never happen. */
782 for (u
= deps
->reg_last
[r
].clobbers
; u
; u
= XEXP (u
, 1))
783 add_dependence (insn
, XEXP (u
, 0), 0);
785 if (call_used_regs
[r
] || global_regs
[r
])
786 /* Function calls clobber all call_used regs. */
787 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
788 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
791 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
792 it does not reload. Ignore these as they have served their
794 else if (regno
>= deps
->max_reg
)
796 if (GET_CODE (PATTERN (insn
)) != USE
797 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
802 deps
->reg_last
[regno
].uses
803 = alloc_INSN_LIST (insn
, deps
->reg_last
[regno
].uses
);
804 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, regno
);
806 for (u
= deps
->reg_last
[regno
].sets
; u
; u
= XEXP (u
, 1))
807 add_dependence (insn
, XEXP (u
, 0), 0);
809 /* ??? This should never happen. */
810 for (u
= deps
->reg_last
[regno
].clobbers
; u
; u
= XEXP (u
, 1))
811 add_dependence (insn
, XEXP (u
, 0), 0);
813 /* Pseudos that are REG_EQUIV to something may be replaced
814 by that during reloading. We need only add dependencies for
815 the address in the REG_EQUIV note. */
816 if (!reload_completed
817 && reg_known_equiv_p
[regno
]
818 && GET_CODE (reg_known_value
[regno
]) == MEM
)
819 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
821 /* If the register does not already cross any calls, then add this
822 insn to the sched_before_next_call list so that it will still
823 not cross calls after scheduling. */
824 if (REG_N_CALLS_CROSSED (regno
) == 0)
825 add_dependence (deps
->sched_before_next_call
, insn
,
833 /* Reading memory. */
835 rtx pending
, pending_mem
;
837 pending
= deps
->pending_read_insns
;
838 pending_mem
= deps
->pending_read_mems
;
841 if (read_dependence (XEXP (pending_mem
, 0), x
))
842 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
844 pending
= XEXP (pending
, 1);
845 pending_mem
= XEXP (pending_mem
, 1);
848 pending
= deps
->pending_write_insns
;
849 pending_mem
= deps
->pending_write_mems
;
852 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
854 add_dependence (insn
, XEXP (pending
, 0), 0);
856 pending
= XEXP (pending
, 1);
857 pending_mem
= XEXP (pending_mem
, 1);
860 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
861 if (GET_CODE (XEXP (u
, 0)) != JUMP_INSN
862 || deps_may_trap_p (x
))
863 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
865 /* Always add these dependencies to pending_reads, since
866 this insn may be followed by a write. */
867 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
868 &deps
->pending_read_mems
, insn
, x
);
870 /* Take advantage of tail recursion here. */
871 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
875 /* Force pending stores to memory in case a trap handler needs them. */
877 flush_pending_lists (deps
, insn
, 1);
882 case UNSPEC_VOLATILE
:
886 /* Traditional and volatile asm instructions must be considered to use
887 and clobber all hard registers, all pseudo-registers and all of
888 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
890 Consider for instance a volatile asm that changes the fpu rounding
891 mode. An insn should not be moved across this even if it only uses
892 pseudo-regs because it might give an incorrectly rounded result. */
893 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
895 for (i
= 0; i
< deps
->max_reg
; i
++)
897 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
899 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
900 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
901 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
902 add_dependence (insn
, XEXP (u
, 0), 0);
903 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
904 add_dependence (insn
, XEXP (u
, 0), 0);
906 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
907 free_INSN_LIST_list (®_last
->uses
);
909 reg_pending_sets_all
= 1;
911 flush_pending_lists (deps
, insn
, 0);
914 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
915 We can not just fall through here since then we would be confused
916 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
917 traditional asms unlike their normal usage. */
919 if (code
== ASM_OPERANDS
)
921 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
922 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
932 /* These both read and modify the result. We must handle them as writes
933 to get proper dependencies for following instructions. We must handle
934 them as reads to get proper dependencies from this to previous
935 instructions. Thus we need to pass them to both sched_analyze_1
936 and sched_analyze_2. We must call sched_analyze_2 first in order
937 to get the proper antecedent for the read. */
938 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
939 sched_analyze_1 (deps
, x
, insn
);
944 /* op0 = op0 + op1 */
945 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
946 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
947 sched_analyze_1 (deps
, x
, insn
);
954 /* Other cases: walk the insn. */
955 fmt
= GET_RTX_FORMAT (code
);
956 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
959 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
960 else if (fmt
[i
] == 'E')
961 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
962 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
966 /* Analyze an INSN with pattern X to find all dependencies. */
969 sched_analyze_insn (deps
, x
, insn
, loop_notes
)
974 register RTX_CODE code
= GET_CODE (x
);
978 if (code
== COND_EXEC
)
980 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
982 /* ??? Should be recording conditions so we reduce the number of
983 false dependancies. */
984 x
= COND_EXEC_CODE (x
);
987 if (code
== SET
|| code
== CLOBBER
)
988 sched_analyze_1 (deps
, x
, insn
);
989 else if (code
== PARALLEL
)
992 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
994 rtx sub
= XVECEXP (x
, 0, i
);
995 code
= GET_CODE (sub
);
997 if (code
== COND_EXEC
)
999 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
1000 sub
= COND_EXEC_CODE (sub
);
1001 code
= GET_CODE (sub
);
1003 if (code
== SET
|| code
== CLOBBER
)
1004 sched_analyze_1 (deps
, sub
, insn
);
1006 sched_analyze_2 (deps
, sub
, insn
);
1010 sched_analyze_2 (deps
, x
, insn
);
1012 /* Mark registers CLOBBERED or used by called function. */
1013 if (GET_CODE (insn
) == CALL_INSN
)
1014 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1016 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1017 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
1019 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
1022 if (GET_CODE (insn
) == JUMP_INSN
)
1024 rtx next
, u
, pending
, pending_mem
;
1025 next
= next_nonnote_insn (insn
);
1026 if (next
&& GET_CODE (next
) == BARRIER
)
1028 for (i
= 0; i
< deps
->max_reg
; i
++)
1030 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1032 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
1033 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1034 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1035 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1036 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
1037 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1043 INIT_REG_SET (&tmp
);
1045 (*current_sched_info
->compute_jump_reg_dependencies
) (insn
, &tmp
);
1046 EXECUTE_IF_SET_IN_REG_SET (&tmp
, 0, i
,
1048 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1049 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1050 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1051 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1052 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1055 CLEAR_REG_SET (&tmp
);
1057 pending
= deps
->pending_write_insns
;
1058 pending_mem
= deps
->pending_write_mems
;
1061 add_dependence (insn
, XEXP (pending
, 0), 0);
1063 pending
= XEXP (pending
, 1);
1064 pending_mem
= XEXP (pending_mem
, 1);
1067 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
1068 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1071 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1072 block, then we must be sure that no instructions are scheduled across it.
1073 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1074 become incorrect. */
1078 int schedule_barrier_found
= 0;
1081 /* Update loop_notes with any notes from this insn. Also determine
1082 if any of the notes on the list correspond to instruction scheduling
1083 barriers (loop, eh & setjmp notes, but not range notes. */
1085 while (XEXP (link
, 1))
1087 if (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
1088 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
1089 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_BEG
1090 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_END
1091 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_SETJMP
)
1092 schedule_barrier_found
= 1;
1094 link
= XEXP (link
, 1);
1096 XEXP (link
, 1) = REG_NOTES (insn
);
1097 REG_NOTES (insn
) = loop_notes
;
1099 /* Add dependencies if a scheduling barrier was found. */
1100 if (schedule_barrier_found
)
1102 for (i
= 0; i
< deps
->max_reg
; i
++)
1104 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1107 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
1108 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1109 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1110 add_dependence (insn
, XEXP (u
, 0), 0);
1111 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
1112 add_dependence (insn
, XEXP (u
, 0), 0);
1114 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1115 free_INSN_LIST_list (®_last
->uses
);
1117 reg_pending_sets_all
= 1;
1119 flush_pending_lists (deps
, insn
, 0);
1124 /* Accumulate clobbers until the next set so that it will be output
1125 dependent on all of them. At the next set we can clear the clobber
1126 list, since subsequent sets will be output dependent on it. */
1127 if (reg_pending_sets_all
)
1129 reg_pending_sets_all
= 0;
1130 for (i
= 0; i
< deps
->max_reg
; i
++)
1132 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1133 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1135 free_INSN_LIST_list (®_last
->sets
);
1136 free_INSN_LIST_list (®_last
->clobbers
);
1138 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1139 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1144 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1146 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1147 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1149 free_INSN_LIST_list (®_last
->sets
);
1150 free_INSN_LIST_list (®_last
->clobbers
);
1152 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1153 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1155 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1157 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1158 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1159 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1162 CLEAR_REG_SET (reg_pending_sets
);
1163 CLEAR_REG_SET (reg_pending_clobbers
);
1165 /* If a post-call group is still open, see if it should remain so.
1166 This insn must be a simple move of a hard reg to a pseudo or
1169 We must avoid moving these insns for correctness on
1170 SMALL_REGISTER_CLASS machines, and for special registers like
1171 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1172 hard regs for all targets. */
1174 if (deps
->in_post_call_group_p
)
1176 rtx tmp
, set
= single_set (insn
);
1177 int src_regno
, dest_regno
;
1180 goto end_call_group
;
1182 tmp
= SET_DEST (set
);
1183 if (GET_CODE (tmp
) == SUBREG
)
1184 tmp
= SUBREG_REG (tmp
);
1185 if (GET_CODE (tmp
) == REG
)
1186 dest_regno
= REGNO (tmp
);
1188 goto end_call_group
;
1190 tmp
= SET_SRC (set
);
1191 if (GET_CODE (tmp
) == SUBREG
)
1192 tmp
= SUBREG_REG (tmp
);
1193 if (GET_CODE (tmp
) == REG
)
1194 src_regno
= REGNO (tmp
);
1196 goto end_call_group
;
1198 if (src_regno
< FIRST_PSEUDO_REGISTER
1199 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1201 set_sched_group_p (insn
);
1202 CANT_MOVE (insn
) = 1;
1207 deps
->in_post_call_group_p
= 0;
1212 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1213 for every dependency. */
1216 sched_analyze (deps
, head
, tail
)
1224 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1226 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1228 /* Clear out the stale LOG_LINKS from flow. */
1229 free_INSN_LIST_list (&LOG_LINKS (insn
));
1231 /* Clear out stale SCHED_GROUP_P. */
1232 SCHED_GROUP_P (insn
) = 0;
1234 /* Make each JUMP_INSN a scheduling barrier for memory
1236 if (GET_CODE (insn
) == JUMP_INSN
)
1237 deps
->last_pending_memory_flush
1238 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1239 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1242 else if (GET_CODE (insn
) == CALL_INSN
)
1247 /* Clear out stale SCHED_GROUP_P. */
1248 SCHED_GROUP_P (insn
) = 0;
1250 CANT_MOVE (insn
) = 1;
1252 /* Clear out the stale LOG_LINKS from flow. */
1253 free_INSN_LIST_list (&LOG_LINKS (insn
));
1255 /* Any instruction using a hard register which may get clobbered
1256 by a call needs to be marked as dependent on this call.
1257 This prevents a use of a hard return reg from being moved
1258 past a void call (i.e. it does not explicitly set the hard
1261 /* If this call is followed by a NOTE_INSN_SETJMP, then assume that
1262 all registers, not just hard registers, may be clobbered by this
1265 /* Insn, being a CALL_INSN, magically depends on
1266 `last_function_call' already. */
1268 if (NEXT_INSN (insn
) && GET_CODE (NEXT_INSN (insn
)) == NOTE
1269 && NOTE_LINE_NUMBER (NEXT_INSN (insn
)) == NOTE_INSN_SETJMP
)
1271 for (i
= 0; i
< deps
->max_reg
; i
++)
1273 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1275 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
1276 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1277 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1278 add_dependence (insn
, XEXP (u
, 0), 0);
1279 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
1280 add_dependence (insn
, XEXP (u
, 0), 0);
1282 free_INSN_LIST_list (®_last
->uses
);
1284 reg_pending_sets_all
= 1;
1286 /* Add a pair of REG_SAVE_NOTEs which we will later
1287 convert back into a NOTE_INSN_SETJMP note. See
1288 reemit_notes for why we use a pair of NOTEs. */
1289 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_SAVE_NOTE
,
1292 REG_NOTES (insn
) = alloc_EXPR_LIST (REG_SAVE_NOTE
,
1293 GEN_INT (NOTE_INSN_SETJMP
),
1298 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1299 if (call_used_regs
[i
] || global_regs
[i
])
1301 for (u
= deps
->reg_last
[i
].uses
; u
; u
= XEXP (u
, 1))
1302 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1303 for (u
= deps
->reg_last
[i
].sets
; u
; u
= XEXP (u
, 1))
1304 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1306 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1310 /* For each insn which shouldn't cross a call, add a dependence
1311 between that insn and this call insn. */
1312 x
= LOG_LINKS (deps
->sched_before_next_call
);
1315 add_dependence (insn
, XEXP (x
, 0), REG_DEP_ANTI
);
1318 free_INSN_LIST_list (&LOG_LINKS (deps
->sched_before_next_call
));
1320 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1323 /* In the absence of interprocedural alias analysis, we must flush
1324 all pending reads and writes, and start new dependencies starting
1325 from here. But only flush writes for constant calls (which may
1326 be passed a pointer to something we haven't written yet). */
1327 flush_pending_lists (deps
, insn
, CONST_CALL_P (insn
));
1329 /* Depend this function call (actually, the user of this
1330 function call) on all hard register clobberage. */
1332 /* last_function_call is now a list of insns. */
1333 free_INSN_LIST_list (&deps
->last_function_call
);
1334 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1336 /* Before reload, begin a post-call group, so as to keep the
1337 lifetimes of hard registers correct. */
1338 if (! reload_completed
)
1339 deps
->in_post_call_group_p
= 1;
1342 /* See comments on reemit_notes as to why we do this.
1343 ??? Actually, the reemit_notes just say what is done, not why. */
1345 else if (GET_CODE (insn
) == NOTE
1346 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_BEG
1347 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_END
))
1349 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
, NOTE_RANGE_INFO (insn
),
1351 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1352 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1355 else if (GET_CODE (insn
) == NOTE
1356 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1357 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
1358 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1359 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
1360 || (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_SETJMP
1361 && GET_CODE (PREV_INSN (insn
)) != CALL_INSN
)))
1365 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1366 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
)
1367 rtx_region
= GEN_INT (NOTE_EH_HANDLER (insn
));
1369 rtx_region
= GEN_INT (0);
1371 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1374 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1375 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1377 CONST_CALL_P (loop_notes
) = CONST_CALL_P (insn
);
1386 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1387 dependences from LOG_LINKS to build forward dependences in
1391 compute_forward_dependences (head
, tail
)
1396 enum reg_note dep_type
;
1398 next_tail
= NEXT_INSN (tail
);
1399 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1401 if (! INSN_P (insn
))
1404 insn
= group_leader (insn
);
1406 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1408 rtx x
= group_leader (XEXP (link
, 0));
1411 if (x
!= XEXP (link
, 0))
1414 #ifdef ENABLE_CHECKING
1415 /* If add_dependence is working properly there should never
1416 be notes, deleted insns or duplicates in the backward
1417 links. Thus we need not check for them here.
1419 However, if we have enabled checking we might as well go
1420 ahead and verify that add_dependence worked properly. */
1421 if (GET_CODE (x
) == NOTE
1422 || INSN_DELETED_P (x
)
1423 || (forward_dependency_cache
!= NULL
1424 && TEST_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1426 || (forward_dependency_cache
== NULL
1427 && find_insn_list (insn
, INSN_DEPEND (x
))))
1429 if (forward_dependency_cache
!= NULL
)
1430 SET_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1434 new_link
= alloc_INSN_LIST (insn
, INSN_DEPEND (x
));
1436 dep_type
= REG_NOTE_KIND (link
);
1437 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1439 INSN_DEPEND (x
) = new_link
;
1440 INSN_DEP_COUNT (insn
) += 1;
1445 /* Initialize variables for region data dependence analysis.
1446 n_bbs is the number of region blocks. */
1452 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1454 deps
->max_reg
= max_reg
;
1455 deps
->reg_last
= (struct deps_reg
*)
1456 xcalloc (max_reg
, sizeof (struct deps_reg
));
1457 INIT_REG_SET (&deps
->reg_last_in_use
);
1459 deps
->pending_read_insns
= 0;
1460 deps
->pending_read_mems
= 0;
1461 deps
->pending_write_insns
= 0;
1462 deps
->pending_write_mems
= 0;
1463 deps
->pending_lists_length
= 0;
1464 deps
->last_pending_memory_flush
= 0;
1465 deps
->last_function_call
= 0;
1466 deps
->in_post_call_group_p
= 0;
1468 deps
->sched_before_next_call
1469 = gen_rtx_INSN (VOIDmode
, 0, NULL_RTX
, NULL_RTX
,
1470 NULL_RTX
, 0, NULL_RTX
, NULL_RTX
);
1471 LOG_LINKS (deps
->sched_before_next_call
) = 0;
1474 /* Free insn lists found in DEPS. */
1482 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1483 times. For a test case with 42000 regs and 8000 small basic blocks,
1484 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1485 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1487 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1488 free_INSN_LIST_list (®_last
->uses
);
1489 free_INSN_LIST_list (®_last
->sets
);
1490 free_INSN_LIST_list (®_last
->clobbers
);
1492 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1494 free (deps
->reg_last
);
1495 deps
->reg_last
= NULL
;
1498 /* If it is profitable to use them, initialize caches for tracking
1499 dependency informatino. LUID is the number of insns to be scheduled,
1500 it is used in the estimate of profitability. */
1503 init_dependency_caches (luid
)
1506 /* ?!? We could save some memory by computing a per-region luid mapping
1507 which could reduce both the number of vectors in the cache and the size
1508 of each vector. Instead we just avoid the cache entirely unless the
1509 average number of instructions in a basic block is very high. See
1510 the comment before the declaration of true_dependency_cache for
1511 what we consider "very high". */
1512 if (luid
/ n_basic_blocks
> 100 * 5)
1514 true_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1515 sbitmap_vector_zero (true_dependency_cache
, luid
);
1516 anti_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1517 sbitmap_vector_zero (anti_dependency_cache
, luid
);
1518 output_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1519 sbitmap_vector_zero (output_dependency_cache
, luid
);
1520 #ifdef ENABLE_CHECKING
1521 forward_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1522 sbitmap_vector_zero (forward_dependency_cache
, luid
);
1527 /* Free the caches allocated in init_dependency_caches. */
1530 free_dependency_caches ()
1532 if (true_dependency_cache
)
1534 free (true_dependency_cache
);
1535 true_dependency_cache
= NULL
;
1536 free (anti_dependency_cache
);
1537 anti_dependency_cache
= NULL
;
1538 free (output_dependency_cache
);
1539 output_dependency_cache
= NULL
;
1540 #ifdef ENABLE_CHECKING
1541 free (forward_dependency_cache
);
1542 forward_dependency_cache
= NULL
;
1547 /* Initialize some global variables needed by the dependency analysis
1553 reg_pending_sets
= INITIALIZE_REG_SET (reg_pending_sets_head
);
1554 reg_pending_clobbers
= INITIALIZE_REG_SET (reg_pending_clobbers_head
);
1555 reg_pending_sets_all
= 0;
1558 /* Free everything used by the dependency analysis code. */
1561 finish_deps_global ()
1563 FREE_REG_SET (reg_pending_sets
);
1564 FREE_REG_SET (reg_pending_clobbers
);