1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 #include "hard-reg-set.h"
31 #include "basic-block.h"
35 #include "insn-config.h"
36 #include "insn-attr.h"
40 #include "sched-int.h"
44 extern char *reg_known_equiv_p
;
45 extern rtx
*reg_known_value
;
47 static regset_head reg_pending_sets_head
;
48 static regset_head reg_pending_clobbers_head
;
49 static regset_head reg_pending_uses_head
;
51 static regset reg_pending_sets
;
52 static regset reg_pending_clobbers
;
53 static regset reg_pending_uses
;
54 static bool reg_pending_barrier
;
56 /* To speed up the test for duplicate dependency links we keep a
57 record of dependencies created by add_dependence when the average
58 number of instructions in a basic block is very large.
60 Studies have shown that there is typically around 5 instructions between
61 branches for typical C code. So we can make a guess that the average
62 basic block is approximately 5 instructions long; we will choose 100X
63 the average size as a very large basic block.
65 Each insn has associated bitmaps for its dependencies. Each bitmap
66 has enough entries to represent a dependency on any other insn in
67 the insn chain. All bitmap for true dependencies cache is
68 allocated then the rest two ones are also allocated. */
69 static sbitmap
*true_dependency_cache
;
70 static sbitmap
*anti_dependency_cache
;
71 static sbitmap
*output_dependency_cache
;
73 /* To speed up checking consistency of formed forward insn
74 dependencies we use the following cache. Another possible solution
75 could be switching off checking duplication of insns in forward
77 #ifdef ENABLE_CHECKING
78 static sbitmap
*forward_dependency_cache
;
81 static int deps_may_trap_p
PARAMS ((rtx
));
82 static void add_dependence_list
PARAMS ((rtx
, rtx
, enum reg_note
));
83 static void add_dependence_list_and_free
PARAMS ((rtx
, rtx
*, enum reg_note
));
84 static void remove_dependence
PARAMS ((rtx
, rtx
));
85 static void set_sched_group_p
PARAMS ((rtx
));
87 static void flush_pending_lists
PARAMS ((struct deps
*, rtx
, int, int));
88 static void sched_analyze_1
PARAMS ((struct deps
*, rtx
, rtx
));
89 static void sched_analyze_2
PARAMS ((struct deps
*, rtx
, rtx
));
90 static void sched_analyze_insn
PARAMS ((struct deps
*, rtx
, rtx
, rtx
));
91 static rtx group_leader
PARAMS ((rtx
));
93 static rtx get_condition
PARAMS ((rtx
));
94 static int conditions_mutex_p
PARAMS ((rtx
, rtx
));
96 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
102 rtx addr
= XEXP (mem
, 0);
105 && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
106 && reg_known_value
[REGNO (addr
)])
107 addr
= reg_known_value
[REGNO (addr
)];
108 return rtx_addr_can_trap_p (addr
);
111 /* Return the INSN_LIST containing INSN in LIST, or NULL
112 if LIST does not contain INSN. */
115 find_insn_list (insn
, list
)
121 if (XEXP (list
, 0) == insn
)
123 list
= XEXP (list
, 1);
128 /* Find the condition under which INSN is executed. */
134 rtx pat
= PATTERN (insn
);
139 if (GET_CODE (pat
) == COND_EXEC
)
140 return COND_EXEC_TEST (pat
);
141 if (GET_CODE (insn
) != JUMP_INSN
)
143 if (GET_CODE (pat
) != SET
|| SET_SRC (pat
) != pc_rtx
)
145 if (GET_CODE (SET_DEST (pat
)) != IF_THEN_ELSE
)
147 pat
= SET_DEST (pat
);
148 cond
= XEXP (pat
, 0);
149 if (GET_CODE (XEXP (cond
, 1)) == LABEL_REF
150 && XEXP (cond
, 2) == pc_rtx
)
152 else if (GET_CODE (XEXP (cond
, 2)) == LABEL_REF
153 && XEXP (cond
, 1) == pc_rtx
)
154 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond
)), GET_MODE (cond
),
155 XEXP (cond
, 0), XEXP (cond
, 1));
160 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
163 conditions_mutex_p (cond1
, cond2
)
166 if (GET_RTX_CLASS (GET_CODE (cond1
)) == '<'
167 && GET_RTX_CLASS (GET_CODE (cond2
)) == '<'
168 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
))
169 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
170 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
175 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
177 of dependence that this link represents. */
180 add_dependence (insn
, elem
, dep_type
)
183 enum reg_note dep_type
;
189 /* Don't depend an insn on itself. */
193 /* We can get a dependency on deleted insns due to optimizations in
194 the register allocation and reloading or due to splitting. Any
195 such dependency is useless and can be ignored. */
196 if (GET_CODE (elem
) == NOTE
)
199 /* flow.c doesn't handle conditional lifetimes entirely correctly;
200 calls mess up the conditional lifetimes. */
201 /* ??? add_dependence is the wrong place to be eliding dependencies,
202 as that forgets that the condition expressions themselves may
204 if (GET_CODE (insn
) != CALL_INSN
&& GET_CODE (elem
) != CALL_INSN
)
206 cond1
= get_condition (insn
);
207 cond2
= get_condition (elem
);
209 && conditions_mutex_p (cond1
, cond2
)
210 /* Make sure first instruction doesn't affect condition of second
211 instruction if switched. */
212 && !modified_in_p (cond1
, elem
)
213 /* Make sure second instruction doesn't affect condition of first
214 instruction if switched. */
215 && !modified_in_p (cond2
, insn
))
219 /* If elem is part of a sequence that must be scheduled together, then
220 make the dependence point to the last insn of the sequence.
221 When HAVE_cc0, it is possible for NOTEs to exist between users and
222 setters of the condition codes, so we must skip past notes here.
223 Otherwise, NOTEs are impossible here. */
224 next
= next_nonnote_insn (elem
);
225 if (next
&& SCHED_GROUP_P (next
)
226 && GET_CODE (next
) != CODE_LABEL
)
228 /* Notes will never intervene here though, so don't bother checking
231 /* We must reject CODE_LABELs, so that we don't get confused by one
232 that has LABEL_PRESERVE_P set, which is represented by the same
233 bit in the rtl as SCHED_GROUP_P. A CODE_LABEL can never be
237 while ((nnext
= next_nonnote_insn (next
)) != NULL
238 && SCHED_GROUP_P (nnext
)
239 && GET_CODE (nnext
) != CODE_LABEL
)
242 /* Again, don't depend an insn on itself. */
246 /* Make the dependence to NEXT, the last insn of the group, instead
247 of the original ELEM. */
252 #ifdef INSN_SCHEDULING
253 /* ??? No good way to tell from here whether we're doing interblock
254 scheduling. Possibly add another callback. */
256 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
257 No need for interblock dependences with calls, since
258 calls are not moved between blocks. Note: the edge where
259 elem is a CALL is still required. */
260 if (GET_CODE (insn
) == CALL_INSN
261 && (INSN_BB (elem
) != INSN_BB (insn
)))
265 /* If we already have a dependency for ELEM, then we do not need to
266 do anything. Avoiding the list walk below can cut compile times
267 dramatically for some code. */
268 if (true_dependency_cache
!= NULL
)
270 enum reg_note present_dep_type
= 0;
272 if (anti_dependency_cache
== NULL
|| output_dependency_cache
== NULL
)
274 if (TEST_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
)))
275 /* Do nothing (present_set_type is already 0). */
277 else if (TEST_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
279 present_dep_type
= REG_DEP_ANTI
;
280 else if (TEST_BIT (output_dependency_cache
[INSN_LUID (insn
)],
282 present_dep_type
= REG_DEP_OUTPUT
;
285 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
290 /* Check that we don't already have this dependence. */
292 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
293 if (XEXP (link
, 0) == elem
)
295 #ifdef INSN_SCHEDULING
296 /* Clear corresponding cache entry because type of the link
298 if (true_dependency_cache
!= NULL
)
300 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
301 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
303 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
304 && output_dependency_cache
)
305 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
312 /* If this is a more restrictive type of dependence than the existing
313 one, then change the existing dependence to this type. */
314 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
315 PUT_REG_NOTE_KIND (link
, dep_type
);
317 #ifdef INSN_SCHEDULING
318 /* If we are adding a dependency to INSN's LOG_LINKs, then
319 note that in the bitmap caches of dependency information. */
320 if (true_dependency_cache
!= NULL
)
322 if ((int) REG_NOTE_KIND (link
) == 0)
323 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
325 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
326 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
328 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
329 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
335 /* Might want to check one level of transitivity to save conses. */
337 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
338 LOG_LINKS (insn
) = link
;
340 /* Insn dependency, not data dependency. */
341 PUT_REG_NOTE_KIND (link
, dep_type
);
343 #ifdef INSN_SCHEDULING
344 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
345 in the bitmap caches of dependency information. */
346 if (true_dependency_cache
!= NULL
)
348 if ((int) dep_type
== 0)
349 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
350 else if (dep_type
== REG_DEP_ANTI
)
351 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
352 else if (dep_type
== REG_DEP_OUTPUT
)
353 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
358 /* A convenience wrapper to operate on an entire list. */
361 add_dependence_list (insn
, list
, dep_type
)
363 enum reg_note dep_type
;
365 for (; list
; list
= XEXP (list
, 1))
366 add_dependence (insn
, XEXP (list
, 0), dep_type
);
369 /* Similar, but free *LISTP at the same time. */
372 add_dependence_list_and_free (insn
, listp
, dep_type
)
375 enum reg_note dep_type
;
378 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
380 next
= XEXP (list
, 1);
381 add_dependence (insn
, XEXP (list
, 0), dep_type
);
382 free_INSN_LIST_node (list
);
386 /* Remove ELEM wrapped in an INSN_LIST from the LOG_LINKS
387 of INSN. Abort if not found. */
390 remove_dependence (insn
, elem
)
394 rtx prev
, link
, next
;
397 for (prev
= 0, link
= LOG_LINKS (insn
); link
; link
= next
)
399 next
= XEXP (link
, 1);
400 if (XEXP (link
, 0) == elem
)
403 XEXP (prev
, 1) = next
;
405 LOG_LINKS (insn
) = next
;
407 #ifdef INSN_SCHEDULING
408 /* If we are removing a dependency from the LOG_LINKS list,
409 make sure to remove it from the cache too. */
410 if (true_dependency_cache
!= NULL
)
412 if (REG_NOTE_KIND (link
) == 0)
413 RESET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
415 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
416 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
418 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
419 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
424 free_INSN_LIST_node (link
);
437 /* Return an insn which represents a SCHED_GROUP, which is
438 the last insn in the group. */
449 insn
= next_nonnote_insn (insn
);
451 while (insn
&& SCHED_GROUP_P (insn
) && (GET_CODE (insn
) != CODE_LABEL
));
456 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
457 goes along with that. */
460 set_sched_group_p (insn
)
465 SCHED_GROUP_P (insn
) = 1;
467 /* There may be a note before this insn now, but all notes will
468 be removed before we actually try to schedule the insns, so
469 it won't cause a problem later. We must avoid it here though. */
470 prev
= prev_nonnote_insn (insn
);
472 /* Make a copy of all dependencies on the immediately previous insn,
473 and add to this insn. This is so that all the dependencies will
474 apply to the group. Remove an explicit dependence on this insn
475 as SCHED_GROUP_P now represents it. */
477 if (find_insn_list (prev
, LOG_LINKS (insn
)))
478 remove_dependence (insn
, prev
);
480 for (link
= LOG_LINKS (prev
); link
; link
= XEXP (link
, 1))
481 add_dependence (insn
, XEXP (link
, 0), REG_NOTE_KIND (link
));
484 /* Process an insn's memory dependencies. There are four kinds of
487 (0) read dependence: read follows read
488 (1) true dependence: read follows write
489 (2) anti dependence: write follows read
490 (3) output dependence: write follows write
492 We are careful to build only dependencies which actually exist, and
493 use transitivity to avoid building too many links. */
495 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
496 The MEM is a memory reference contained within INSN, which we are saving
497 so that we can do memory aliasing on it. */
500 add_insn_mem_dependence (deps
, insn_list
, mem_list
, insn
, mem
)
502 rtx
*insn_list
, *mem_list
, insn
, mem
;
506 link
= alloc_INSN_LIST (insn
, *insn_list
);
509 if (current_sched_info
->use_cselib
)
511 mem
= shallow_copy_rtx (mem
);
512 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
514 link
= alloc_EXPR_LIST (VOIDmode
, mem
, *mem_list
);
517 deps
->pending_lists_length
++;
520 /* Make a dependency between every memory reference on the pending lists
521 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
522 dependencies for a read operation, similarly with FOR_WRITE. */
525 flush_pending_lists (deps
, insn
, for_read
, for_write
)
528 int for_read
, for_write
;
532 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
,
534 free_EXPR_LIST_list (&deps
->pending_read_mems
);
537 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
,
538 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
539 free_EXPR_LIST_list (&deps
->pending_write_mems
);
540 deps
->pending_lists_length
= 0;
542 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
,
543 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
544 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
545 deps
->pending_flush_length
= 1;
548 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
549 rtx, X, creating all dependencies generated by the write to the
550 destination of X, and reads of everything mentioned. */
553 sched_analyze_1 (deps
, x
, insn
)
559 rtx dest
= XEXP (x
, 0);
560 enum rtx_code code
= GET_CODE (x
);
565 if (GET_CODE (dest
) == PARALLEL
)
569 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
570 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
571 sched_analyze_1 (deps
,
572 gen_rtx_CLOBBER (VOIDmode
,
573 XEXP (XVECEXP (dest
, 0, i
), 0)),
576 if (GET_CODE (x
) == SET
)
577 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
581 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
582 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
584 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
586 /* The second and third arguments are values read by this insn. */
587 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
588 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
590 dest
= XEXP (dest
, 0);
593 if (GET_CODE (dest
) == REG
)
595 regno
= REGNO (dest
);
597 /* A hard reg in a wide mode may really be multiple registers.
598 If so, mark all of them just like the first. */
599 if (regno
< FIRST_PSEUDO_REGISTER
)
601 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
605 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
610 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
613 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
614 it does not reload. Ignore these as they have served their
616 else if (regno
>= deps
->max_reg
)
618 if (GET_CODE (PATTERN (insn
)) != USE
619 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
625 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
627 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
629 /* Pseudos that are REG_EQUIV to something may be replaced
630 by that during reloading. We need only add dependencies for
631 the address in the REG_EQUIV note. */
632 if (!reload_completed
633 && reg_known_equiv_p
[regno
]
634 && GET_CODE (reg_known_value
[regno
]) == MEM
)
635 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
637 /* Don't let it cross a call after scheduling if it doesn't
638 already cross one. */
639 if (REG_N_CALLS_CROSSED (regno
) == 0)
640 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_ANTI
);
643 else if (GET_CODE (dest
) == MEM
)
645 /* Writing memory. */
648 if (current_sched_info
->use_cselib
)
650 t
= shallow_copy_rtx (dest
);
651 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
652 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
655 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
657 /* Flush all pending reads and writes to prevent the pending lists
658 from getting any larger. Insn scheduling runs too slowly when
659 these lists get long. When compiling GCC with itself,
660 this flush occurs 8 times for sparc, and 10 times for m88k using
661 the default value of 32. */
662 flush_pending_lists (deps
, insn
, false, true);
666 rtx pending
, pending_mem
;
668 pending
= deps
->pending_read_insns
;
669 pending_mem
= deps
->pending_read_mems
;
672 if (anti_dependence (XEXP (pending_mem
, 0), t
))
673 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
675 pending
= XEXP (pending
, 1);
676 pending_mem
= XEXP (pending_mem
, 1);
679 pending
= deps
->pending_write_insns
;
680 pending_mem
= deps
->pending_write_mems
;
683 if (output_dependence (XEXP (pending_mem
, 0), t
))
684 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
686 pending
= XEXP (pending
, 1);
687 pending_mem
= XEXP (pending_mem
, 1);
690 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
693 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
694 &deps
->pending_write_mems
, insn
, dest
);
696 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
700 if (GET_CODE (x
) == SET
)
701 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
704 /* Analyze the uses of memory and registers in rtx X in INSN. */
707 sched_analyze_2 (deps
, x
, insn
)
730 /* Ignore constants. Note that we must handle CONST_DOUBLE here
731 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
732 this does not mean that this insn is using cc0. */
737 /* User of CC0 depends on immediately preceding insn. */
738 set_sched_group_p (insn
);
744 int regno
= REGNO (x
);
745 if (regno
< FIRST_PSEUDO_REGISTER
)
747 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
749 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
751 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
752 it does not reload. Ignore these as they have served their
754 else if (regno
>= deps
->max_reg
)
756 if (GET_CODE (PATTERN (insn
)) != USE
757 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
762 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
764 /* Pseudos that are REG_EQUIV to something may be replaced
765 by that during reloading. We need only add dependencies for
766 the address in the REG_EQUIV note. */
767 if (!reload_completed
768 && reg_known_equiv_p
[regno
]
769 && GET_CODE (reg_known_value
[regno
]) == MEM
)
770 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
772 /* If the register does not already cross any calls, then add this
773 insn to the sched_before_next_call list so that it will still
774 not cross calls after scheduling. */
775 if (REG_N_CALLS_CROSSED (regno
) == 0)
776 deps
->sched_before_next_call
777 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
784 /* Reading memory. */
786 rtx pending
, pending_mem
;
789 if (current_sched_info
->use_cselib
)
791 t
= shallow_copy_rtx (t
);
792 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
793 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
795 pending
= deps
->pending_read_insns
;
796 pending_mem
= deps
->pending_read_mems
;
799 if (read_dependence (XEXP (pending_mem
, 0), t
))
800 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
802 pending
= XEXP (pending
, 1);
803 pending_mem
= XEXP (pending_mem
, 1);
806 pending
= deps
->pending_write_insns
;
807 pending_mem
= deps
->pending_write_mems
;
810 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
812 add_dependence (insn
, XEXP (pending
, 0), 0);
814 pending
= XEXP (pending
, 1);
815 pending_mem
= XEXP (pending_mem
, 1);
818 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
819 if (GET_CODE (XEXP (u
, 0)) != JUMP_INSN
820 || deps_may_trap_p (x
))
821 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
823 /* Always add these dependencies to pending_reads, since
824 this insn may be followed by a write. */
825 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
826 &deps
->pending_read_mems
, insn
, x
);
828 /* Take advantage of tail recursion here. */
829 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
833 /* Force pending stores to memory in case a trap handler needs them. */
835 flush_pending_lists (deps
, insn
, true, false);
840 case UNSPEC_VOLATILE
:
842 /* Traditional and volatile asm instructions must be considered to use
843 and clobber all hard registers, all pseudo-registers and all of
844 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
846 Consider for instance a volatile asm that changes the fpu rounding
847 mode. An insn should not be moved across this even if it only uses
848 pseudo-regs because it might give an incorrectly rounded result. */
849 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
850 reg_pending_barrier
= true;
852 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
853 We can not just fall through here since then we would be confused
854 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
855 traditional asms unlike their normal usage. */
857 if (code
== ASM_OPERANDS
)
859 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
860 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
870 /* These both read and modify the result. We must handle them as writes
871 to get proper dependencies for following instructions. We must handle
872 them as reads to get proper dependencies from this to previous
873 instructions. Thus we need to pass them to both sched_analyze_1
874 and sched_analyze_2. We must call sched_analyze_2 first in order
875 to get the proper antecedent for the read. */
876 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
877 sched_analyze_1 (deps
, x
, insn
);
882 /* op0 = op0 + op1 */
883 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
884 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
885 sched_analyze_1 (deps
, x
, insn
);
892 /* Other cases: walk the insn. */
893 fmt
= GET_RTX_FORMAT (code
);
894 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
897 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
898 else if (fmt
[i
] == 'E')
899 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
900 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
904 /* Analyze an INSN with pattern X to find all dependencies. */
907 sched_analyze_insn (deps
, x
, insn
, loop_notes
)
912 RTX_CODE code
= GET_CODE (x
);
916 if (code
== COND_EXEC
)
918 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
920 /* ??? Should be recording conditions so we reduce the number of
921 false dependencies. */
922 x
= COND_EXEC_CODE (x
);
925 if (code
== SET
|| code
== CLOBBER
)
926 sched_analyze_1 (deps
, x
, insn
);
927 else if (code
== PARALLEL
)
930 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
932 rtx sub
= XVECEXP (x
, 0, i
);
933 code
= GET_CODE (sub
);
935 if (code
== COND_EXEC
)
937 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
938 sub
= COND_EXEC_CODE (sub
);
939 code
= GET_CODE (sub
);
941 if (code
== SET
|| code
== CLOBBER
)
942 sched_analyze_1 (deps
, sub
, insn
);
944 sched_analyze_2 (deps
, sub
, insn
);
948 sched_analyze_2 (deps
, x
, insn
);
950 /* Mark registers CLOBBERED or used by called function. */
951 if (GET_CODE (insn
) == CALL_INSN
)
953 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
955 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
956 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
958 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
960 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
961 reg_pending_barrier
= true;
964 if (GET_CODE (insn
) == JUMP_INSN
)
967 next
= next_nonnote_insn (insn
);
968 if (next
&& GET_CODE (next
) == BARRIER
)
969 reg_pending_barrier
= true;
972 rtx pending
, pending_mem
;
976 (*current_sched_info
->compute_jump_reg_dependencies
) (insn
, &tmp
);
977 IOR_REG_SET (reg_pending_uses
, &tmp
);
978 CLEAR_REG_SET (&tmp
);
980 /* All memory writes and volatile reads must happen before the
981 jump. Non-volatile reads must happen before the jump iff
982 the result is needed by the above register used mask. */
984 pending
= deps
->pending_write_insns
;
985 pending_mem
= deps
->pending_write_mems
;
988 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
989 pending
= XEXP (pending
, 1);
990 pending_mem
= XEXP (pending_mem
, 1);
993 pending
= deps
->pending_read_insns
;
994 pending_mem
= deps
->pending_read_mems
;
997 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0)))
998 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
999 pending
= XEXP (pending
, 1);
1000 pending_mem
= XEXP (pending_mem
, 1);
1003 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
1008 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
1009 block, then we must be sure that no instructions are scheduled across it.
1010 Otherwise, the reg_n_refs info (which depends on loop_depth) would
1011 become incorrect. */
1016 /* Update loop_notes with any notes from this insn. Also determine
1017 if any of the notes on the list correspond to instruction scheduling
1018 barriers (loop, eh & setjmp notes, but not range notes). */
1020 while (XEXP (link
, 1))
1022 if (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
1023 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
1024 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_BEG
1025 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_END
)
1026 reg_pending_barrier
= true;
1028 link
= XEXP (link
, 1);
1030 XEXP (link
, 1) = REG_NOTES (insn
);
1031 REG_NOTES (insn
) = loop_notes
;
1034 /* If this instruction can throw an exception, then moving it changes
1035 where block boundaries fall. This is mighty confusing elsewhere.
1036 Therefore, prevent such an instruction from being moved. */
1037 if (can_throw_internal (insn
))
1038 reg_pending_barrier
= true;
1040 /* Add dependencies if a scheduling barrier was found. */
1041 if (reg_pending_barrier
)
1043 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1045 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1047 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1048 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1049 add_dependence_list (insn
, reg_last
->sets
, 0);
1050 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1055 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1057 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1058 add_dependence_list_and_free (insn
, ®_last
->uses
,
1060 add_dependence_list_and_free (insn
, ®_last
->sets
, 0);
1061 add_dependence_list_and_free (insn
, ®_last
->clobbers
, 0);
1062 reg_last
->uses_length
= 0;
1063 reg_last
->clobbers_length
= 0;
1067 for (i
= 0; i
< deps
->max_reg
; i
++)
1069 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1070 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1071 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
1074 flush_pending_lists (deps
, insn
, true, true);
1075 reg_pending_barrier
= false;
1079 /* If the current insn is conditional, we can't free any
1081 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1083 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
1085 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1086 add_dependence_list (insn
, reg_last
->sets
, 0);
1087 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1088 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1089 reg_last
->uses_length
++;
1091 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1093 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1094 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1095 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1096 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1097 reg_last
->clobbers_length
++;
1099 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1101 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1102 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1103 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_OUTPUT
);
1104 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1105 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1110 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
1112 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1113 add_dependence_list (insn
, reg_last
->sets
, 0);
1114 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1115 reg_last
->uses_length
++;
1116 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1118 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1120 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1121 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1122 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1123 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1124 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1126 add_dependence_list_and_free (insn
, ®_last
->sets
,
1128 add_dependence_list_and_free (insn
, ®_last
->uses
,
1130 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1132 reg_last
->clobbers_length
= 0;
1133 reg_last
->uses_length
= 0;
1137 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1138 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1140 reg_last
->clobbers_length
++;
1141 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1143 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1145 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1146 add_dependence_list_and_free (insn
, ®_last
->sets
,
1148 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1150 add_dependence_list_and_free (insn
, ®_last
->uses
,
1152 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1153 reg_last
->uses_length
= 0;
1154 reg_last
->clobbers_length
= 0;
1158 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1159 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1160 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1162 CLEAR_REG_SET (reg_pending_uses
);
1163 CLEAR_REG_SET (reg_pending_clobbers
);
1164 CLEAR_REG_SET (reg_pending_sets
);
1166 /* If a post-call group is still open, see if it should remain so.
1167 This insn must be a simple move of a hard reg to a pseudo or
1170 We must avoid moving these insns for correctness on
1171 SMALL_REGISTER_CLASS machines, and for special registers like
1172 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1173 hard regs for all targets. */
1175 if (deps
->in_post_call_group_p
)
1177 rtx tmp
, set
= single_set (insn
);
1178 int src_regno
, dest_regno
;
1181 goto end_call_group
;
1183 tmp
= SET_DEST (set
);
1184 if (GET_CODE (tmp
) == SUBREG
)
1185 tmp
= SUBREG_REG (tmp
);
1186 if (GET_CODE (tmp
) == REG
)
1187 dest_regno
= REGNO (tmp
);
1189 goto end_call_group
;
1191 tmp
= SET_SRC (set
);
1192 if (GET_CODE (tmp
) == SUBREG
)
1193 tmp
= SUBREG_REG (tmp
);
1194 if (GET_CODE (tmp
) == REG
)
1195 src_regno
= REGNO (tmp
);
1197 goto end_call_group
;
1199 if (src_regno
< FIRST_PSEUDO_REGISTER
1200 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1202 set_sched_group_p (insn
);
1203 CANT_MOVE (insn
) = 1;
1208 deps
->in_post_call_group_p
= false;
1213 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1214 for every dependency. */
1217 sched_analyze (deps
, head
, tail
)
1224 if (current_sched_info
->use_cselib
)
1227 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1229 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1231 /* Clear out the stale LOG_LINKS from flow. */
1232 free_INSN_LIST_list (&LOG_LINKS (insn
));
1234 /* Clear out stale SCHED_GROUP_P. */
1235 SCHED_GROUP_P (insn
) = 0;
1237 /* Make each JUMP_INSN a scheduling barrier for memory
1239 if (GET_CODE (insn
) == JUMP_INSN
)
1241 /* Keep the list a reasonable size. */
1242 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1243 flush_pending_lists (deps
, insn
, true, true);
1245 deps
->last_pending_memory_flush
1246 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1248 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1251 else if (GET_CODE (insn
) == CALL_INSN
)
1255 /* Clear out stale SCHED_GROUP_P. */
1256 SCHED_GROUP_P (insn
) = 0;
1258 CANT_MOVE (insn
) = 1;
1260 /* Clear out the stale LOG_LINKS from flow. */
1261 free_INSN_LIST_list (&LOG_LINKS (insn
));
1263 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1265 /* This is setjmp. Assume that all registers, not just
1266 hard registers, may be clobbered by this call. */
1267 reg_pending_barrier
= true;
1271 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1272 /* A call may read and modify global register variables. */
1275 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1276 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1278 /* Other call-clobbered hard regs may be clobbered. */
1279 else if (TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1280 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1281 /* We don't know what set of fixed registers might be used
1282 by the function, but it is certain that the stack pointer
1283 is among them, but be conservative. */
1284 else if (fixed_regs
[i
])
1285 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1286 /* The frame pointer is normally not used by the function
1287 itself, but by the debugger. */
1288 /* ??? MIPS o32 is an exception. It uses the frame pointer
1289 in the macro expansion of jal but does not represent this
1290 fact in the call_insn rtl. */
1291 else if (i
== FRAME_POINTER_REGNUM
1292 || (i
== HARD_FRAME_POINTER_REGNUM
1293 && (! reload_completed
|| frame_pointer_needed
)))
1294 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1297 /* For each insn which shouldn't cross a call, add a dependence
1298 between that insn and this call insn. */
1299 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
,
1302 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1305 /* In the absence of interprocedural alias analysis, we must flush
1306 all pending reads and writes, and start new dependencies starting
1307 from here. But only flush writes for constant calls (which may
1308 be passed a pointer to something we haven't written yet). */
1309 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1311 /* Remember the last function call for limiting lifetimes. */
1312 free_INSN_LIST_list (&deps
->last_function_call
);
1313 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1315 /* Before reload, begin a post-call group, so as to keep the
1316 lifetimes of hard registers correct. */
1317 if (! reload_completed
)
1318 deps
->in_post_call_group_p
= true;
1321 /* See comments on reemit_notes as to why we do this.
1322 ??? Actually, the reemit_notes just say what is done, not why. */
1324 else if (GET_CODE (insn
) == NOTE
1325 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_BEG
1326 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_RANGE_END
))
1328 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
, NOTE_RANGE_INFO (insn
),
1330 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1331 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1334 else if (GET_CODE (insn
) == NOTE
1335 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1336 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
1337 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1338 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
))
1342 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1343 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
)
1344 rtx_region
= GEN_INT (NOTE_EH_HANDLER (insn
));
1346 rtx_region
= GEN_INT (0);
1348 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1351 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1352 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1354 CONST_OR_PURE_CALL_P (loop_notes
) = CONST_OR_PURE_CALL_P (insn
);
1357 if (current_sched_info
->use_cselib
)
1358 cselib_process_insn (insn
);
1361 if (current_sched_info
->use_cselib
)
1369 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1370 dependences from LOG_LINKS to build forward dependences in
1374 compute_forward_dependences (head
, tail
)
1379 enum reg_note dep_type
;
1381 next_tail
= NEXT_INSN (tail
);
1382 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1384 if (! INSN_P (insn
))
1387 insn
= group_leader (insn
);
1389 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1391 rtx x
= group_leader (XEXP (link
, 0));
1394 if (x
!= XEXP (link
, 0))
1397 #ifdef ENABLE_CHECKING
1398 /* If add_dependence is working properly there should never
1399 be notes, deleted insns or duplicates in the backward
1400 links. Thus we need not check for them here.
1402 However, if we have enabled checking we might as well go
1403 ahead and verify that add_dependence worked properly. */
1404 if (GET_CODE (x
) == NOTE
1405 || INSN_DELETED_P (x
)
1406 || (forward_dependency_cache
!= NULL
1407 && TEST_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1409 || (forward_dependency_cache
== NULL
1410 && find_insn_list (insn
, INSN_DEPEND (x
))))
1412 if (forward_dependency_cache
!= NULL
)
1413 SET_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1417 new_link
= alloc_INSN_LIST (insn
, INSN_DEPEND (x
));
1419 dep_type
= REG_NOTE_KIND (link
);
1420 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1422 INSN_DEPEND (x
) = new_link
;
1423 INSN_DEP_COUNT (insn
) += 1;
1428 /* Initialize variables for region data dependence analysis.
1429 n_bbs is the number of region blocks. */
1435 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1437 deps
->max_reg
= max_reg
;
1438 deps
->reg_last
= (struct deps_reg
*)
1439 xcalloc (max_reg
, sizeof (struct deps_reg
));
1440 INIT_REG_SET (&deps
->reg_last_in_use
);
1442 deps
->pending_read_insns
= 0;
1443 deps
->pending_read_mems
= 0;
1444 deps
->pending_write_insns
= 0;
1445 deps
->pending_write_mems
= 0;
1446 deps
->pending_lists_length
= 0;
1447 deps
->pending_flush_length
= 0;
1448 deps
->last_pending_memory_flush
= 0;
1449 deps
->last_function_call
= 0;
1450 deps
->sched_before_next_call
= 0;
1451 deps
->in_post_call_group_p
= false;
1454 /* Free insn lists found in DEPS. */
1462 free_INSN_LIST_list (&deps
->pending_read_insns
);
1463 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1464 free_INSN_LIST_list (&deps
->pending_write_insns
);
1465 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1466 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1468 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1469 times. For a test case with 42000 regs and 8000 small basic blocks,
1470 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1471 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1473 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1474 free_INSN_LIST_list (®_last
->uses
);
1475 free_INSN_LIST_list (®_last
->sets
);
1476 free_INSN_LIST_list (®_last
->clobbers
);
1478 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1480 free (deps
->reg_last
);
1483 /* If it is profitable to use them, initialize caches for tracking
1484 dependency informatino. LUID is the number of insns to be scheduled,
1485 it is used in the estimate of profitability. */
1488 init_dependency_caches (luid
)
1491 /* ?!? We could save some memory by computing a per-region luid mapping
1492 which could reduce both the number of vectors in the cache and the size
1493 of each vector. Instead we just avoid the cache entirely unless the
1494 average number of instructions in a basic block is very high. See
1495 the comment before the declaration of true_dependency_cache for
1496 what we consider "very high". */
1497 if (luid
/ n_basic_blocks
> 100 * 5)
1499 true_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1500 sbitmap_vector_zero (true_dependency_cache
, luid
);
1501 anti_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1502 sbitmap_vector_zero (anti_dependency_cache
, luid
);
1503 output_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1504 sbitmap_vector_zero (output_dependency_cache
, luid
);
1505 #ifdef ENABLE_CHECKING
1506 forward_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1507 sbitmap_vector_zero (forward_dependency_cache
, luid
);
1512 /* Free the caches allocated in init_dependency_caches. */
1515 free_dependency_caches ()
1517 if (true_dependency_cache
)
1519 sbitmap_vector_free (true_dependency_cache
);
1520 true_dependency_cache
= NULL
;
1521 sbitmap_vector_free (anti_dependency_cache
);
1522 anti_dependency_cache
= NULL
;
1523 sbitmap_vector_free (output_dependency_cache
);
1524 output_dependency_cache
= NULL
;
1525 #ifdef ENABLE_CHECKING
1526 sbitmap_vector_free (forward_dependency_cache
);
1527 forward_dependency_cache
= NULL
;
1532 /* Initialize some global variables needed by the dependency analysis
1538 reg_pending_sets
= INITIALIZE_REG_SET (reg_pending_sets_head
);
1539 reg_pending_clobbers
= INITIALIZE_REG_SET (reg_pending_clobbers_head
);
1540 reg_pending_uses
= INITIALIZE_REG_SET (reg_pending_uses_head
);
1541 reg_pending_barrier
= false;
1544 /* Free everything used by the dependency analysis code. */
1547 finish_deps_global ()
1549 FREE_REG_SET (reg_pending_sets
);
1550 FREE_REG_SET (reg_pending_clobbers
);
1551 FREE_REG_SET (reg_pending_uses
);