1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
44 extern char *reg_known_equiv_p
;
45 extern rtx
*reg_known_value
;
47 static regset_head reg_pending_sets_head
;
48 static regset_head reg_pending_clobbers_head
;
50 static regset reg_pending_sets
;
51 static regset reg_pending_clobbers
;
52 static int reg_pending_sets_all
;
54 /* To speed up the test for duplicate dependency links we keep a
55 record of dependencies created by add_dependence when the average
56 number of instructions in a basic block is very large.
58 Studies have shown that there is typically around 5 instructions between
59 branches for typical C code. So we can make a guess that the average
60 basic block is approximately 5 instructions long; we will choose 100X
61 the average size as a very large basic block.
63 Each insn has associated bitmaps for its dependencies. Each bitmap
64 has enough entries to represent a dependency on any other insn in
65 the insn chain. All bitmap for true dependencies cache is
66 allocated then the rest two ones are also allocated. */
67 static sbitmap
*true_dependency_cache
;
68 static sbitmap
*anti_dependency_cache
;
69 static sbitmap
*output_dependency_cache
;
71 /* To speed up checking consistency of formed forward insn
72 dependencies we use the following cache. Another possible solution
73 could be switching off checking duplication of insns in forward
75 #ifdef ENABLE_CHECKING
76 static sbitmap
*forward_dependency_cache
;
79 static int deps_may_trap_p
PARAMS ((rtx
));
80 static void remove_dependence
PARAMS ((rtx
, rtx
));
81 static void set_sched_group_p
PARAMS ((rtx
));
83 static void flush_pending_lists
PARAMS ((struct deps
*, rtx
, int));
84 static void sched_analyze_1
PARAMS ((struct deps
*, rtx
, rtx
));
85 static void sched_analyze_2
PARAMS ((struct deps
*, rtx
, rtx
));
86 static void sched_analyze_insn
PARAMS ((struct deps
*, rtx
, rtx
, rtx
));
87 static rtx group_leader
PARAMS ((rtx
));
89 static rtx get_condition
PARAMS ((rtx
));
90 static int conditions_mutex_p
PARAMS ((rtx
, rtx
));
92 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
98 rtx addr
= XEXP (mem
, 0);
101 && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
102 && reg_known_value
[REGNO (addr
)])
103 addr
= reg_known_value
[REGNO (addr
)];
104 return rtx_addr_can_trap_p (addr
);
107 /* Return the INSN_LIST containing INSN in LIST, or NULL
108 if LIST does not contain INSN. */
111 find_insn_list (insn
, list
)
117 if (XEXP (list
, 0) == insn
)
119 list
= XEXP (list
, 1);
124 /* Return 1 if the pair (insn, x) is found in (LIST, LIST1), or 0
128 find_insn_mem_list (insn
, x
, list
, list1
)
134 if (XEXP (list
, 0) == insn
135 && XEXP (list1
, 0) == x
)
137 list
= XEXP (list
, 1);
138 list1
= XEXP (list1
, 1);
143 /* Find the condition under which INSN is executed. */
149 rtx pat
= PATTERN (insn
);
154 if (GET_CODE (pat
) == COND_EXEC
)
155 return COND_EXEC_TEST (pat
);
156 if (GET_CODE (insn
) != JUMP_INSN
)
158 if (GET_CODE (pat
) != SET
|| SET_SRC (pat
) != pc_rtx
)
160 if (GET_CODE (SET_DEST (pat
)) != IF_THEN_ELSE
)
162 pat
= SET_DEST (pat
);
163 cond
= XEXP (pat
, 0);
164 if (GET_CODE (XEXP (cond
, 1)) == LABEL_REF
165 && XEXP (cond
, 2) == pc_rtx
)
167 else if (GET_CODE (XEXP (cond
, 2)) == LABEL_REF
168 && XEXP (cond
, 1) == pc_rtx
)
169 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond
)), GET_MODE (cond
),
170 XEXP (cond
, 0), XEXP (cond
, 1));
175 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
178 conditions_mutex_p (cond1
, cond2
)
181 if (GET_RTX_CLASS (GET_CODE (cond1
)) == '<'
182 && GET_RTX_CLASS (GET_CODE (cond2
)) == '<'
183 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
))
184 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
185 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
190 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
191 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
192 of dependence that this link represents. */
195 add_dependence (insn
, elem
, dep_type
)
198 enum reg_note dep_type
;
204 /* Don't depend an insn on itself. */
208 /* We can get a dependency on deleted insns due to optimizations in
209 the register allocation and reloading or due to splitting. Any
210 such dependency is useless and can be ignored. */
211 if (GET_CODE (elem
) == NOTE
)
214 /* flow.c doesn't handle conditional lifetimes entirely correctly;
215 calls mess up the conditional lifetimes. */
216 /* ??? add_dependence is the wrong place to be eliding dependencies,
217 as that forgets that the condition expressions themselves may
219 if (GET_CODE (insn
) != CALL_INSN
&& GET_CODE (elem
) != CALL_INSN
)
221 cond1
= get_condition (insn
);
222 cond2
= get_condition (elem
);
224 && conditions_mutex_p (cond1
, cond2
)
225 /* Make sure first instruction doesn't affect condition of second
226 instruction if switched. */
227 && !modified_in_p (cond1
, elem
)
228 /* Make sure second instruction doesn't affect condition of first
229 instruction if switched. */
230 && !modified_in_p (cond2
, insn
))
234 /* If elem is part of a sequence that must be scheduled together, then
235 make the dependence point to the last insn of the sequence.
236 When HAVE_cc0, it is possible for NOTEs to exist between users and
237 setters of the condition codes, so we must skip past notes here.
238 Otherwise, NOTEs are impossible here. */
239 next
= next_nonnote_insn (elem
);
240 if (next
&& SCHED_GROUP_P (next
)
241 && GET_CODE (next
) != CODE_LABEL
)
243 /* Notes will never intervene here though, so don't bother checking
246 /* We must reject CODE_LABELs, so that we don't get confused by one
247 that has LABEL_PRESERVE_P set, which is represented by the same
248 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
252 while ((nnext
= next_nonnote_insn (next
)) != NULL
253 && SCHED_GROUP_P (nnext
)
254 && GET_CODE (nnext
) != CODE_LABEL
)
257 /* Again, don't depend an insn on itself. */
261 /* Make the dependence to NEXT, the last insn of the group, instead
262 of the original ELEM. */
267 #ifdef INSN_SCHEDULING
268 /* ??? No good way to tell from here whether we're doing interblock
269 scheduling. Possibly add another callback. */
271 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
272 No need for interblock dependences with calls, since
273 calls are not moved between blocks. Note: the edge where
274 elem is a CALL is still required. */
275 if (GET_CODE (insn
) == CALL_INSN
276 && (INSN_BB (elem
) != INSN_BB (insn
)))
280 /* If we already have a dependency for ELEM, then we do not need to
281 do anything. Avoiding the list walk below can cut compile times
282 dramatically for some code. */
283 if (true_dependency_cache
!= NULL
)
285 enum reg_note present_dep_type
= 0;
287 if (anti_dependency_cache
== NULL
|| output_dependency_cache
== NULL
)
289 if (TEST_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
)))
290 /* Do nothing (present_set_type is already 0). */
292 else if (TEST_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
294 present_dep_type
= REG_DEP_ANTI
;
295 else if (TEST_BIT (output_dependency_cache
[INSN_LUID (insn
)],
297 present_dep_type
= REG_DEP_OUTPUT
;
300 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
305 /* Check that we don't already have this dependence. */
307 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
308 if (XEXP (link
, 0) == elem
)
310 #ifdef INSN_SCHEDULING
311 /* Clear corresponding cache entry because type of the link
313 if (true_dependency_cache
!= NULL
)
315 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
316 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
318 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
319 && output_dependency_cache
)
320 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
327 /* If this is a more restrictive type of dependence than the existing
328 one, then change the existing dependence to this type. */
329 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
330 PUT_REG_NOTE_KIND (link
, dep_type
);
332 #ifdef INSN_SCHEDULING
333 /* If we are adding a dependency to INSN's LOG_LINKs, then
334 note that in the bitmap caches of dependency information. */
335 if (true_dependency_cache
!= NULL
)
337 if ((int)REG_NOTE_KIND (link
) == 0)
338 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
340 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
341 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
343 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
344 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
350 /* Might want to check one level of transitivity to save conses. */
352 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
353 LOG_LINKS (insn
) = link
;
355 /* Insn dependency, not data dependency. */
356 PUT_REG_NOTE_KIND (link
, dep_type
);
358 #ifdef INSN_SCHEDULING
359 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
360 in the bitmap caches of dependency information. */
361 if (true_dependency_cache
!= NULL
)
363 if ((int)dep_type
== 0)
364 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
365 else if (dep_type
== REG_DEP_ANTI
)
366 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
367 else if (dep_type
== REG_DEP_OUTPUT
)
368 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
373 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
374 of INSN. Abort if not found. */
377 remove_dependence (insn
, elem
)
381 rtx prev
, link
, next
;
384 for (prev
= 0, link
= LOG_LINKS (insn
); link
; link
= next
)
386 next
= XEXP (link
, 1);
387 if (XEXP (link
, 0) == elem
)
390 XEXP (prev
, 1) = next
;
392 LOG_LINKS (insn
) = next
;
394 #ifdef INSN_SCHEDULING
395 /* If we are removing a dependency from the LOG_LINKS list,
396 make sure to remove it from the cache too. */
397 if (true_dependency_cache
!= NULL
)
399 if (REG_NOTE_KIND (link
) == 0)
400 RESET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
402 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
403 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
405 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
406 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
411 free_INSN_LIST_node (link
);
424 /* Return an insn which represents a SCHED_GROUP, which is
425 the last insn in the group. */
436 insn
= next_nonnote_insn (insn
);
438 while (insn
&& SCHED_GROUP_P (insn
) && (GET_CODE (insn
) != CODE_LABEL
));
443 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
444 goes along with that. */
447 set_sched_group_p (insn
)
452 SCHED_GROUP_P (insn
) = 1;
454 /* There may be a note before this insn now, but all notes will
455 be removed before we actually try to schedule the insns, so
456 it won't cause a problem later. We must avoid it here though. */
457 prev
= prev_nonnote_insn (insn
);
459 /* Make a copy of all dependencies on the immediately previous insn,
460 and add to this insn. This is so that all the dependencies will
461 apply to the group. Remove an explicit dependence on this insn
462 as SCHED_GROUP_P now represents it. */
464 if (find_insn_list (prev
, LOG_LINKS (insn
)))
465 remove_dependence (insn
, prev
);
467 for (link
= LOG_LINKS (prev
); link
; link
= XEXP (link
, 1))
468 add_dependence (insn
, XEXP (link
, 0), REG_NOTE_KIND (link
));
471 /* Process an insn's memory dependencies. There are four kinds of
474 (0) read dependence: read follows read
475 (1) true dependence: read follows write
476 (2) anti dependence: write follows read
477 (3) output dependence: write follows write
479 We are careful to build only dependencies which actually exist, and
480 use transitivity to avoid building too many links. */
482 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
483 The MEM is a memory reference contained within INSN, which we are saving
484 so that we can do memory aliasing on it. */
487 add_insn_mem_dependence (deps
, insn_list
, mem_list
, insn
, mem
)
489 rtx
*insn_list
, *mem_list
, insn
, mem
;
493 link
= alloc_INSN_LIST (insn
, *insn_list
);
496 if (current_sched_info
->use_cselib
)
498 mem
= shallow_copy_rtx (mem
);
499 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
501 link
= alloc_EXPR_LIST (VOIDmode
, mem
, *mem_list
);
504 deps
->pending_lists_length
++;
507 /* Make a dependency between every memory reference on the pending lists
508 and INSN, thus flushing the pending lists. If ONLY_WRITE, don't flush
512 flush_pending_lists (deps
, insn
, only_write
)
520 while (deps
->pending_read_insns
&& ! only_write
)
522 add_dependence (insn
, XEXP (deps
->pending_read_insns
, 0),
525 link
= deps
->pending_read_insns
;
526 deps
->pending_read_insns
= XEXP (deps
->pending_read_insns
, 1);
527 free_INSN_LIST_node (link
);
529 link
= deps
->pending_read_mems
;
530 deps
->pending_read_mems
= XEXP (deps
->pending_read_mems
, 1);
531 free_EXPR_LIST_node (link
);
533 while (deps
->pending_write_insns
)
535 add_dependence (insn
, XEXP (deps
->pending_write_insns
, 0),
538 link
= deps
->pending_write_insns
;
539 deps
->pending_write_insns
= XEXP (deps
->pending_write_insns
, 1);
540 free_INSN_LIST_node (link
);
542 link
= deps
->pending_write_mems
;
543 deps
->pending_write_mems
= XEXP (deps
->pending_write_mems
, 1);
544 free_EXPR_LIST_node (link
);
546 deps
->pending_lists_length
= 0;
548 /* last_pending_memory_flush is now a list of insns. */
549 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
550 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
552 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
553 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
554 deps
->pending_flush_length
= 1;
557 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
558 rtx, X, creating all dependencies generated by the write to the
559 destination of X, and reads of everything mentioned. */
562 sched_analyze_1 (deps
, x
, insn
)
568 rtx dest
= XEXP (x
, 0);
569 enum rtx_code code
= GET_CODE (x
);
574 if (GET_CODE (dest
) == PARALLEL
)
578 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
579 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
580 sched_analyze_1 (deps
,
581 gen_rtx_CLOBBER (VOIDmode
,
582 XEXP (XVECEXP (dest
, 0, i
), 0)),
585 if (GET_CODE (x
) == SET
)
586 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
590 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
591 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
593 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
595 /* The second and third arguments are values read by this insn. */
596 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
597 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
599 dest
= XEXP (dest
, 0);
602 if (GET_CODE (dest
) == REG
)
606 regno
= REGNO (dest
);
608 /* A hard reg in a wide mode may really be multiple registers.
609 If so, mark all of them just like the first. */
610 if (regno
< FIRST_PSEUDO_REGISTER
)
612 i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
618 for (u
= deps
->reg_last
[r
].uses
; u
; u
= XEXP (u
, 1))
619 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
621 for (u
= deps
->reg_last
[r
].sets
; u
; u
= XEXP (u
, 1))
622 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
624 /* Clobbers need not be ordered with respect to one
625 another, but sets must be ordered with respect to a
629 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
630 free_INSN_LIST_list (&deps
->reg_last
[r
].uses
);
631 for (u
= deps
->reg_last
[r
].clobbers
; u
; u
= XEXP (u
, 1))
632 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
633 SET_REGNO_REG_SET (reg_pending_sets
, r
);
636 SET_REGNO_REG_SET (reg_pending_clobbers
, r
);
638 /* Function calls clobber all call_used regs. */
641 && TEST_HARD_REG_BIT (regs_invalidated_by_call
, r
)))
642 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
643 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
646 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
647 it does not reload. Ignore these as they have served their
649 else if (regno
>= deps
->max_reg
)
651 if (GET_CODE (PATTERN (insn
)) != USE
652 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
659 for (u
= deps
->reg_last
[regno
].uses
; u
; u
= XEXP (u
, 1))
660 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
662 for (u
= deps
->reg_last
[regno
].sets
; u
; u
= XEXP (u
, 1))
663 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
667 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
668 free_INSN_LIST_list (&deps
->reg_last
[regno
].uses
);
669 for (u
= deps
->reg_last
[regno
].clobbers
; u
; u
= XEXP (u
, 1))
670 add_dependence (insn
, XEXP (u
, 0), REG_DEP_OUTPUT
);
671 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
674 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
676 /* Pseudos that are REG_EQUIV to something may be replaced
677 by that during reloading. We need only add dependencies for
678 the address in the REG_EQUIV note. */
679 if (!reload_completed
680 && reg_known_equiv_p
[regno
]
681 && GET_CODE (reg_known_value
[regno
]) == MEM
)
682 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
684 /* Don't let it cross a call after scheduling if it doesn't
685 already cross one. */
687 if (REG_N_CALLS_CROSSED (regno
) == 0)
688 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
689 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
692 else if (GET_CODE (dest
) == MEM
)
694 /* Writing memory. */
697 if (current_sched_info
->use_cselib
)
699 t
= shallow_copy_rtx (dest
);
700 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
701 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
704 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
706 /* Flush all pending reads and writes to prevent the pending lists
707 from getting any larger. Insn scheduling runs too slowly when
708 these lists get long. When compiling GCC with itself,
709 this flush occurs 8 times for sparc, and 10 times for m88k using
710 the default value of 32. */
711 flush_pending_lists (deps
, insn
, 0);
716 rtx pending
, pending_mem
;
718 pending
= deps
->pending_read_insns
;
719 pending_mem
= deps
->pending_read_mems
;
722 if (anti_dependence (XEXP (pending_mem
, 0), t
))
723 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
725 pending
= XEXP (pending
, 1);
726 pending_mem
= XEXP (pending_mem
, 1);
729 pending
= deps
->pending_write_insns
;
730 pending_mem
= deps
->pending_write_mems
;
733 if (output_dependence (XEXP (pending_mem
, 0), t
))
734 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
736 pending
= XEXP (pending
, 1);
737 pending_mem
= XEXP (pending_mem
, 1);
740 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
741 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
743 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
744 &deps
->pending_write_mems
, insn
, dest
);
746 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
750 if (GET_CODE (x
) == SET
)
751 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
754 /* Analyze the uses of memory and registers in rtx X in INSN. */
757 sched_analyze_2 (deps
, x
, insn
)
779 /* Ignore constants. Note that we must handle CONST_DOUBLE here
780 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
781 this does not mean that this insn is using cc0. */
786 /* User of CC0 depends on immediately preceding insn. */
787 set_sched_group_p (insn
);
794 int regno
= REGNO (x
);
795 if (regno
< FIRST_PSEUDO_REGISTER
)
799 i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
803 deps
->reg_last
[r
].uses
804 = alloc_INSN_LIST (insn
, deps
->reg_last
[r
].uses
);
805 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, r
);
807 for (u
= deps
->reg_last
[r
].sets
; u
; u
= XEXP (u
, 1))
808 add_dependence (insn
, XEXP (u
, 0), 0);
810 /* ??? This should never happen. */
811 for (u
= deps
->reg_last
[r
].clobbers
; u
; u
= XEXP (u
, 1))
812 add_dependence (insn
, XEXP (u
, 0), 0);
814 if (call_used_regs
[r
] || global_regs
[r
])
815 /* Function calls clobber all call_used regs. */
816 for (u
= deps
->last_function_call
; u
; u
= XEXP (u
, 1))
817 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
820 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
821 it does not reload. Ignore these as they have served their
823 else if (regno
>= deps
->max_reg
)
825 if (GET_CODE (PATTERN (insn
)) != USE
826 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
831 deps
->reg_last
[regno
].uses
832 = alloc_INSN_LIST (insn
, deps
->reg_last
[regno
].uses
);
833 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, regno
);
835 for (u
= deps
->reg_last
[regno
].sets
; u
; u
= XEXP (u
, 1))
836 add_dependence (insn
, XEXP (u
, 0), 0);
838 /* ??? This should never happen. */
839 for (u
= deps
->reg_last
[regno
].clobbers
; u
; u
= XEXP (u
, 1))
840 add_dependence (insn
, XEXP (u
, 0), 0);
842 /* Pseudos that are REG_EQUIV to something may be replaced
843 by that during reloading. We need only add dependencies for
844 the address in the REG_EQUIV note. */
845 if (!reload_completed
846 && reg_known_equiv_p
[regno
]
847 && GET_CODE (reg_known_value
[regno
]) == MEM
)
848 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
850 /* If the register does not already cross any calls, then add this
851 insn to the sched_before_next_call list so that it will still
852 not cross calls after scheduling. */
853 if (REG_N_CALLS_CROSSED (regno
) == 0)
854 add_dependence (deps
->sched_before_next_call
, insn
,
862 /* Reading memory. */
864 rtx pending
, pending_mem
;
867 if (current_sched_info
->use_cselib
)
869 t
= shallow_copy_rtx (t
);
870 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
871 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
873 pending
= deps
->pending_read_insns
;
874 pending_mem
= deps
->pending_read_mems
;
877 if (read_dependence (XEXP (pending_mem
, 0), t
))
878 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
880 pending
= XEXP (pending
, 1);
881 pending_mem
= XEXP (pending_mem
, 1);
884 pending
= deps
->pending_write_insns
;
885 pending_mem
= deps
->pending_write_mems
;
888 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
890 add_dependence (insn
, XEXP (pending
, 0), 0);
892 pending
= XEXP (pending
, 1);
893 pending_mem
= XEXP (pending_mem
, 1);
896 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
897 if (GET_CODE (XEXP (u
, 0)) != JUMP_INSN
898 || deps_may_trap_p (x
))
899 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
901 /* Always add these dependencies to pending_reads, since
902 this insn may be followed by a write. */
903 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
904 &deps
->pending_read_mems
, insn
, x
);
906 /* Take advantage of tail recursion here. */
907 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
911 /* Force pending stores to memory in case a trap handler needs them. */
913 flush_pending_lists (deps
, insn
, 1);
918 case UNSPEC_VOLATILE
:
922 /* Traditional and volatile asm instructions must be considered to use
923 and clobber all hard registers, all pseudo-registers and all of
924 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
926 Consider for instance a volatile asm that changes the fpu rounding
927 mode. An insn should not be moved across this even if it only uses
928 pseudo-regs because it might give an incorrectly rounded result. */
929 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
931 for (i
= 0; i
< deps
->max_reg
; i
++)
933 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
935 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
936 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
937 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
938 add_dependence (insn
, XEXP (u
, 0), 0);
939 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
940 add_dependence (insn
, XEXP (u
, 0), 0);
942 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
943 free_INSN_LIST_list (®_last
->uses
);
945 reg_pending_sets_all
= 1;
947 flush_pending_lists (deps
, insn
, 0);
950 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
951 We can not just fall through here since then we would be confused
952 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
953 traditional asms unlike their normal usage. */
955 if (code
== ASM_OPERANDS
)
957 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
958 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
968 /* These both read and modify the result. We must handle them as writes
969 to get proper dependencies for following instructions. We must handle
970 them as reads to get proper dependencies from this to previous
971 instructions. Thus we need to pass them to both sched_analyze_1
972 and sched_analyze_2. We must call sched_analyze_2 first in order
973 to get the proper antecedent for the read. */
974 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
975 sched_analyze_1 (deps
, x
, insn
);
980 /* op0 = op0 + op1 */
981 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
982 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
983 sched_analyze_1 (deps
, x
, insn
);
990 /* Other cases: walk the insn. */
991 fmt
= GET_RTX_FORMAT (code
);
992 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
995 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
996 else if (fmt
[i
] == 'E')
997 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
998 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
1002 /* Analyze an INSN with pattern X to find all dependencies. */
1005 sched_analyze_insn (deps
, x
, insn
, loop_notes
)
1010 RTX_CODE code
= GET_CODE (x
);
1011 int schedule_barrier_found
= 0;
1015 if (code
== COND_EXEC
)
1017 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
1019 /* ??? Should be recording conditions so we reduce the number of
1020 false dependencies. */
1021 x
= COND_EXEC_CODE (x
);
1022 code
= GET_CODE (x
);
1024 if (code
== SET
|| code
== CLOBBER
)
1025 sched_analyze_1 (deps
, x
, insn
);
1026 else if (code
== PARALLEL
)
1029 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
1031 rtx sub
= XVECEXP (x
, 0, i
);
1032 code
= GET_CODE (sub
);
1034 if (code
== COND_EXEC
)
1036 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
1037 sub
= COND_EXEC_CODE (sub
);
1038 code
= GET_CODE (sub
);
1040 if (code
== SET
|| code
== CLOBBER
)
1041 sched_analyze_1 (deps
, sub
, insn
);
1043 sched_analyze_2 (deps
, sub
, insn
);
1047 sched_analyze_2 (deps
, x
, insn
);
1049 /* Mark registers CLOBBERED or used by called function. */
1050 if (GET_CODE (insn
) == CALL_INSN
)
1052 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
1054 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
1055 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
1057 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
1059 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1060 schedule_barrier_found
= 1;
1063 if (GET_CODE (insn
) == JUMP_INSN
)
1066 next
= next_nonnote_insn (insn
);
1067 if (next
&& GET_CODE (next
) == BARRIER
)
1068 schedule_barrier_found
= 1;
1071 rtx pending
, pending_mem
, u
;
1073 INIT_REG_SET (&tmp
);
1075 (*current_sched_info
->compute_jump_reg_dependencies
) (insn
, &tmp
);
1076 EXECUTE_IF_SET_IN_REG_SET (&tmp
, 0, i
,
1078 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1079 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1080 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1081 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1082 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1085 CLEAR_REG_SET (&tmp
);
1087 /* All memory writes and volatile reads must happen before the
1088 jump. Non-volatile reads must happen before the jump iff
1089 the result is needed by the above register used mask. */
1091 pending
= deps
->pending_write_insns
;
1092 pending_mem
= deps
->pending_write_mems
;
1095 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1096 pending
= XEXP (pending
, 1);
1097 pending_mem
= XEXP (pending_mem
, 1);
1100 pending
= deps
->pending_read_insns
;
1101 pending_mem
= deps
->pending_read_mems
;
1104 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0)))
1105 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
1106 pending
= XEXP (pending
, 1);
1107 pending_mem
= XEXP (pending_mem
, 1);
1110 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
1111 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1115 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1116 block, then we must be sure that no instructions are scheduled across it.
1117 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1118 become incorrect. */
1123 /* Update loop_notes with any notes from this insn. Also determine
1124 if any of the notes on the list correspond to instruction scheduling
1125 barriers (loop, eh & setjmp notes, but not range notes). */
1127 while (XEXP (link
, 1))
1129 if (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
1130 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
1131 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_BEG
1132 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_END
)
1133 schedule_barrier_found
= 1;
1135 link
= XEXP (link
, 1);
1137 XEXP (link
, 1) = REG_NOTES (insn
);
1138 REG_NOTES (insn
) = loop_notes
;
1141 /* If this instruction can throw an exception, then moving it changes
1142 where block boundaries fall. This is mighty confusing elsewhere.
1143 Therefore, prevent such an instruction from being moved. */
1144 if (can_throw_internal (insn
))
1145 schedule_barrier_found
= 1;
1147 /* Add dependencies if a scheduling barrier was found. */
1148 if (schedule_barrier_found
)
1152 for (i
= 0; i
< deps
->max_reg
; i
++)
1154 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1156 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
1157 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1158 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1159 add_dependence (insn
, XEXP (u
, 0), 0);
1160 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
1161 add_dependence (insn
, XEXP (u
, 0), 0);
1163 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1164 free_INSN_LIST_list (®_last
->uses
);
1166 flush_pending_lists (deps
, insn
, 0);
1168 reg_pending_sets_all
= 1;
1171 /* Accumulate clobbers until the next set so that it will be output
1172 dependent on all of them. At the next set we can clear the clobber
1173 list, since subsequent sets will be output dependent on it. */
1174 if (reg_pending_sets_all
)
1176 reg_pending_sets_all
= 0;
1177 for (i
= 0; i
< deps
->max_reg
; i
++)
1179 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1180 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1182 free_INSN_LIST_list (®_last
->sets
);
1183 free_INSN_LIST_list (®_last
->clobbers
);
1185 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1186 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1191 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1193 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1194 if (GET_CODE (PATTERN (insn
)) != COND_EXEC
)
1196 free_INSN_LIST_list (®_last
->sets
);
1197 free_INSN_LIST_list (®_last
->clobbers
);
1199 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1200 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1202 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1204 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1205 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1206 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1209 CLEAR_REG_SET (reg_pending_sets
);
1210 CLEAR_REG_SET (reg_pending_clobbers
);
1212 /* If a post-call group is still open, see if it should remain so.
1213 This insn must be a simple move of a hard reg to a pseudo or
1216 We must avoid moving these insns for correctness on
1217 SMALL_REGISTER_CLASS machines, and for special registers like
1218 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1219 hard regs for all targets. */
1221 if (deps
->in_post_call_group_p
)
1223 rtx tmp
, set
= single_set (insn
);
1224 int src_regno
, dest_regno
;
1227 goto end_call_group
;
1229 tmp
= SET_DEST (set
);
1230 if (GET_CODE (tmp
) == SUBREG
)
1231 tmp
= SUBREG_REG (tmp
);
1232 if (GET_CODE (tmp
) == REG
)
1233 dest_regno
= REGNO (tmp
);
1235 goto end_call_group
;
1237 tmp
= SET_SRC (set
);
1238 if (GET_CODE (tmp
) == SUBREG
)
1239 tmp
= SUBREG_REG (tmp
);
1240 if (GET_CODE (tmp
) == REG
)
1241 src_regno
= REGNO (tmp
);
1243 goto end_call_group
;
1245 if (src_regno
< FIRST_PSEUDO_REGISTER
1246 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1248 set_sched_group_p (insn
);
1249 CANT_MOVE (insn
) = 1;
1254 deps
->in_post_call_group_p
= 0;
1259 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1260 for every dependency. */
1263 sched_analyze (deps
, head
, tail
)
1271 if (current_sched_info
->use_cselib
)
1274 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1276 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1278 /* Clear out the stale LOG_LINKS from flow. */
1279 free_INSN_LIST_list (&LOG_LINKS (insn
));
1281 /* Clear out stale SCHED_GROUP_P. */
1282 SCHED_GROUP_P (insn
) = 0;
1284 /* Make each JUMP_INSN a scheduling barrier for memory
1286 if (GET_CODE (insn
) == JUMP_INSN
)
1288 /* Keep the list a reasonable size. */
1289 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1290 flush_pending_lists (deps
, insn
, 0);
1292 deps
->last_pending_memory_flush
1293 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1295 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1298 else if (GET_CODE (insn
) == CALL_INSN
)
1303 /* Clear out stale SCHED_GROUP_P. */
1304 SCHED_GROUP_P (insn
) = 0;
1306 CANT_MOVE (insn
) = 1;
1308 /* Clear out the stale LOG_LINKS from flow. */
1309 free_INSN_LIST_list (&LOG_LINKS (insn
));
1311 /* Any instruction using a hard register which may get clobbered
1312 by a call needs to be marked as dependent on this call.
1313 This prevents a use of a hard return reg from being moved
1314 past a void call (i.e. it does not explicitly set the hard
1317 /* If this call has REG_SETJMP, then assume that
1318 all registers, not just hard registers, may be clobbered by this
1321 /* Insn, being a CALL_INSN, magically depends on
1322 `last_function_call' already. */
1324 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1326 for (i
= 0; i
< deps
->max_reg
; i
++)
1328 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1330 for (u
= reg_last
->uses
; u
; u
= XEXP (u
, 1))
1331 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1332 for (u
= reg_last
->sets
; u
; u
= XEXP (u
, 1))
1333 add_dependence (insn
, XEXP (u
, 0), 0);
1334 for (u
= reg_last
->clobbers
; u
; u
= XEXP (u
, 1))
1335 add_dependence (insn
, XEXP (u
, 0), 0);
1337 free_INSN_LIST_list (®_last
->uses
);
1339 reg_pending_sets_all
= 1;
1343 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1344 if (call_used_regs
[i
] || global_regs
[i
])
1346 for (u
= deps
->reg_last
[i
].uses
; u
; u
= XEXP (u
, 1))
1347 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1348 for (u
= deps
->reg_last
[i
].sets
; u
; u
= XEXP (u
, 1))
1349 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
1351 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1355 /* For each insn which shouldn't cross a call, add a dependence
1356 between that insn and this call insn. */
1357 x
= LOG_LINKS (deps
->sched_before_next_call
);
1360 add_dependence (insn
, XEXP (x
, 0), REG_DEP_ANTI
);
1363 free_INSN_LIST_list (&LOG_LINKS (deps
->sched_before_next_call
));
1365 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1368 /* In the absence of interprocedural alias analysis, we must flush
1369 all pending reads and writes, and start new dependencies starting
1370 from here. But only flush writes for constant calls (which may
1371 be passed a pointer to something we haven't written yet). */
1372 flush_pending_lists (deps
, insn
, CONST_OR_PURE_CALL_P (insn
));
1374 /* Depend this function call (actually, the user of this
1375 function call) on all hard register clobberage. */
1377 /* last_function_call is now a list of insns. */
1378 free_INSN_LIST_list (&deps
->last_function_call
);
1379 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1381 /* Before reload, begin a post-call group, so as to keep the
1382 lifetimes of hard registers correct. */
1383 if (! reload_completed
)
1384 deps
->in_post_call_group_p
= 1;
1387 /* See comments on reemit_notes as to why we do this.
1388 ??? Actually, the reemit_notes just say what is done, not why. */
1390 else if (GET_CODE (insn
) == NOTE
1391 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_BEG
1392 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_END
))
1394 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
, NOTE_RANGE_INFO (insn
),
1396 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1397 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1400 else if (GET_CODE (insn
) == NOTE
1401 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1402 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
1403 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1404 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
))
1408 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1409 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
)
1410 rtx_region
= GEN_INT (NOTE_EH_HANDLER (insn
));
1412 rtx_region
= GEN_INT (0);
1414 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1417 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1418 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1420 CONST_OR_PURE_CALL_P (loop_notes
) = CONST_OR_PURE_CALL_P (insn
);
1423 if (current_sched_info
->use_cselib
)
1424 cselib_process_insn (insn
);
1427 if (current_sched_info
->use_cselib
)
1435 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1436 dependences from LOG_LINKS to build forward dependences in
1440 compute_forward_dependences (head
, tail
)
1445 enum reg_note dep_type
;
1447 next_tail
= NEXT_INSN (tail
);
1448 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1450 if (! INSN_P (insn
))
1453 insn
= group_leader (insn
);
1455 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1457 rtx x
= group_leader (XEXP (link
, 0));
1460 if (x
!= XEXP (link
, 0))
1463 #ifdef ENABLE_CHECKING
1464 /* If add_dependence is working properly there should never
1465 be notes, deleted insns or duplicates in the backward
1466 links. Thus we need not check for them here.
1468 However, if we have enabled checking we might as well go
1469 ahead and verify that add_dependence worked properly. */
1470 if (GET_CODE (x
) == NOTE
1471 || INSN_DELETED_P (x
)
1472 || (forward_dependency_cache
!= NULL
1473 && TEST_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1475 || (forward_dependency_cache
== NULL
1476 && find_insn_list (insn
, INSN_DEPEND (x
))))
1478 if (forward_dependency_cache
!= NULL
)
1479 SET_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1483 new_link
= alloc_INSN_LIST (insn
, INSN_DEPEND (x
));
1485 dep_type
= REG_NOTE_KIND (link
);
1486 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1488 INSN_DEPEND (x
) = new_link
;
1489 INSN_DEP_COUNT (insn
) += 1;
1494 /* Initialize variables for region data dependence analysis.
1495 n_bbs is the number of region blocks. */
1501 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1503 deps
->max_reg
= max_reg
;
1504 deps
->reg_last
= (struct deps_reg
*)
1505 xcalloc (max_reg
, sizeof (struct deps_reg
));
1506 INIT_REG_SET (&deps
->reg_last_in_use
);
1508 deps
->pending_read_insns
= 0;
1509 deps
->pending_read_mems
= 0;
1510 deps
->pending_write_insns
= 0;
1511 deps
->pending_write_mems
= 0;
1512 deps
->pending_lists_length
= 0;
1513 deps
->pending_flush_length
= 0;
1514 deps
->last_pending_memory_flush
= 0;
1515 deps
->last_function_call
= 0;
1516 deps
->in_post_call_group_p
= 0;
1518 deps
->sched_before_next_call
1519 = gen_rtx_INSN (VOIDmode
, 0, NULL_RTX
, NULL_RTX
,
1520 NULL_RTX
, 0, NULL_RTX
, NULL_RTX
);
1521 LOG_LINKS (deps
->sched_before_next_call
) = 0;
1524 /* Free insn lists found in DEPS. */
1532 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1533 times. For a test case with 42000 regs and 8000 small basic blocks,
1534 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1535 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1537 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1538 free_INSN_LIST_list (®_last
->uses
);
1539 free_INSN_LIST_list (®_last
->sets
);
1540 free_INSN_LIST_list (®_last
->clobbers
);
1542 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1544 free (deps
->reg_last
);
1545 deps
->reg_last
= NULL
;
1548 /* If it is profitable to use them, initialize caches for tracking
1549 dependency informatino. LUID is the number of insns to be scheduled,
1550 it is used in the estimate of profitability. */
1553 init_dependency_caches (luid
)
1556 /* ?!? We could save some memory by computing a per-region luid mapping
1557 which could reduce both the number of vectors in the cache and the size
1558 of each vector. Instead we just avoid the cache entirely unless the
1559 average number of instructions in a basic block is very high. See
1560 the comment before the declaration of true_dependency_cache for
1561 what we consider "very high". */
1562 if (luid
/ n_basic_blocks
> 100 * 5)
1564 true_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1565 sbitmap_vector_zero (true_dependency_cache
, luid
);
1566 anti_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1567 sbitmap_vector_zero (anti_dependency_cache
, luid
);
1568 output_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1569 sbitmap_vector_zero (output_dependency_cache
, luid
);
1570 #ifdef ENABLE_CHECKING
1571 forward_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1572 sbitmap_vector_zero (forward_dependency_cache
, luid
);
1577 /* Free the caches allocated in init_dependency_caches. */
1580 free_dependency_caches ()
1582 if (true_dependency_cache
)
1584 sbitmap_vector_free (true_dependency_cache
);
1585 true_dependency_cache
= NULL
;
1586 sbitmap_vector_free (anti_dependency_cache
);
1587 anti_dependency_cache
= NULL
;
1588 sbitmap_vector_free (output_dependency_cache
);
1589 output_dependency_cache
= NULL
;
1590 #ifdef ENABLE_CHECKING
1591 sbitmap_vector_free (forward_dependency_cache
);
1592 forward_dependency_cache
= NULL
;
1597 /* Initialize some global variables needed by the dependency analysis
1603 reg_pending_sets
= INITIALIZE_REG_SET (reg_pending_sets_head
);
1604 reg_pending_clobbers
= INITIALIZE_REG_SET (reg_pending_clobbers_head
);
1605 reg_pending_sets_all
= 0;
1608 /* Free everything used by the dependency analysis code. */
1611 finish_deps_global ()
1613 FREE_REG_SET (reg_pending_sets
);
1614 FREE_REG_SET (reg_pending_clobbers
);