1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
42 #include "sched-int.h"
46 extern char *reg_known_equiv_p
;
47 extern rtx
*reg_known_value
;
49 static regset_head reg_pending_sets_head
;
50 static regset_head reg_pending_clobbers_head
;
51 static regset_head reg_pending_uses_head
;
53 static regset reg_pending_sets
;
54 static regset reg_pending_clobbers
;
55 static regset reg_pending_uses
;
56 static bool reg_pending_barrier
;
58 /* To speed up the test for duplicate dependency links we keep a
59 record of dependencies created by add_dependence when the average
60 number of instructions in a basic block is very large.
62 Studies have shown that there is typically around 5 instructions between
63 branches for typical C code. So we can make a guess that the average
64 basic block is approximately 5 instructions long; we will choose 100X
65 the average size as a very large basic block.
67 Each insn has associated bitmaps for its dependencies. Each bitmap
68 has enough entries to represent a dependency on any other insn in
69 the insn chain. All bitmap for true dependencies cache is
70 allocated then the rest two ones are also allocated. */
71 static sbitmap
*true_dependency_cache
;
72 static sbitmap
*anti_dependency_cache
;
73 static sbitmap
*output_dependency_cache
;
75 /* To speed up checking consistency of formed forward insn
76 dependencies we use the following cache. Another possible solution
77 could be switching off checking duplication of insns in forward
79 #ifdef ENABLE_CHECKING
80 static sbitmap
*forward_dependency_cache
;
83 static int deps_may_trap_p
PARAMS ((rtx
));
84 static void add_dependence_list
PARAMS ((rtx
, rtx
, enum reg_note
));
85 static void add_dependence_list_and_free
PARAMS ((rtx
, rtx
*, enum reg_note
));
86 static void set_sched_group_p
PARAMS ((rtx
));
88 static void flush_pending_lists
PARAMS ((struct deps
*, rtx
, int, int));
89 static void sched_analyze_1
PARAMS ((struct deps
*, rtx
, rtx
));
90 static void sched_analyze_2
PARAMS ((struct deps
*, rtx
, rtx
));
91 static void sched_analyze_insn
PARAMS ((struct deps
*, rtx
, rtx
, rtx
));
93 static rtx get_condition
PARAMS ((rtx
));
94 static int conditions_mutex_p
PARAMS ((rtx
, rtx
));
96 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
102 rtx addr
= XEXP (mem
, 0);
105 && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
106 && reg_known_value
[REGNO (addr
)])
107 addr
= reg_known_value
[REGNO (addr
)];
108 return rtx_addr_can_trap_p (addr
);
111 /* Return the INSN_LIST containing INSN in LIST, or NULL
112 if LIST does not contain INSN. */
115 find_insn_list (insn
, list
)
121 if (XEXP (list
, 0) == insn
)
123 list
= XEXP (list
, 1);
128 /* Find the condition under which INSN is executed. */
134 rtx pat
= PATTERN (insn
);
139 if (GET_CODE (pat
) == COND_EXEC
)
140 return COND_EXEC_TEST (pat
);
141 if (GET_CODE (insn
) != JUMP_INSN
)
143 if (GET_CODE (pat
) != SET
|| SET_SRC (pat
) != pc_rtx
)
145 if (GET_CODE (SET_DEST (pat
)) != IF_THEN_ELSE
)
147 pat
= SET_DEST (pat
);
148 cond
= XEXP (pat
, 0);
149 if (GET_CODE (XEXP (cond
, 1)) == LABEL_REF
150 && XEXP (cond
, 2) == pc_rtx
)
152 else if (GET_CODE (XEXP (cond
, 2)) == LABEL_REF
153 && XEXP (cond
, 1) == pc_rtx
)
154 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond
)), GET_MODE (cond
),
155 XEXP (cond
, 0), XEXP (cond
, 1));
160 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
163 conditions_mutex_p (cond1
, cond2
)
166 if (GET_RTX_CLASS (GET_CODE (cond1
)) == '<'
167 && GET_RTX_CLASS (GET_CODE (cond2
)) == '<'
168 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
))
169 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
170 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
175 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
176 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the type
177 of dependence that this link represents. */
180 add_dependence (insn
, elem
, dep_type
)
183 enum reg_note dep_type
;
189 /* Don't depend an insn on itself. */
193 /* We can get a dependency on deleted insns due to optimizations in
194 the register allocation and reloading or due to splitting. Any
195 such dependency is useless and can be ignored. */
196 if (GET_CODE (elem
) == NOTE
)
199 /* flow.c doesn't handle conditional lifetimes entirely correctly;
200 calls mess up the conditional lifetimes. */
201 /* ??? add_dependence is the wrong place to be eliding dependencies,
202 as that forgets that the condition expressions themselves may
204 if (GET_CODE (insn
) != CALL_INSN
&& GET_CODE (elem
) != CALL_INSN
)
206 cond1
= get_condition (insn
);
207 cond2
= get_condition (elem
);
209 && conditions_mutex_p (cond1
, cond2
)
210 /* Make sure first instruction doesn't affect condition of second
211 instruction if switched. */
212 && !modified_in_p (cond1
, elem
)
213 /* Make sure second instruction doesn't affect condition of first
214 instruction if switched. */
215 && !modified_in_p (cond2
, insn
))
220 #ifdef INSN_SCHEDULING
221 /* ??? No good way to tell from here whether we're doing interblock
222 scheduling. Possibly add another callback. */
224 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
225 No need for interblock dependences with calls, since
226 calls are not moved between blocks. Note: the edge where
227 elem is a CALL is still required. */
228 if (GET_CODE (insn
) == CALL_INSN
229 && (INSN_BB (elem
) != INSN_BB (insn
)))
233 /* If we already have a dependency for ELEM, then we do not need to
234 do anything. Avoiding the list walk below can cut compile times
235 dramatically for some code. */
236 if (true_dependency_cache
!= NULL
)
238 enum reg_note present_dep_type
= 0;
240 if (anti_dependency_cache
== NULL
|| output_dependency_cache
== NULL
)
242 if (TEST_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
)))
243 /* Do nothing (present_set_type is already 0). */
245 else if (TEST_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
247 present_dep_type
= REG_DEP_ANTI
;
248 else if (TEST_BIT (output_dependency_cache
[INSN_LUID (insn
)],
250 present_dep_type
= REG_DEP_OUTPUT
;
253 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
258 /* Check that we don't already have this dependence. */
260 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
261 if (XEXP (link
, 0) == elem
)
263 #ifdef INSN_SCHEDULING
264 /* Clear corresponding cache entry because type of the link
266 if (true_dependency_cache
!= NULL
)
268 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
269 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
271 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
272 && output_dependency_cache
)
273 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
280 /* If this is a more restrictive type of dependence than the existing
281 one, then change the existing dependence to this type. */
282 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
283 PUT_REG_NOTE_KIND (link
, dep_type
);
285 #ifdef INSN_SCHEDULING
286 /* If we are adding a dependency to INSN's LOG_LINKs, then
287 note that in the bitmap caches of dependency information. */
288 if (true_dependency_cache
!= NULL
)
290 if ((int) REG_NOTE_KIND (link
) == 0)
291 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
293 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
294 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
296 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
297 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
303 /* Might want to check one level of transitivity to save conses. */
305 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
306 LOG_LINKS (insn
) = link
;
308 /* Insn dependency, not data dependency. */
309 PUT_REG_NOTE_KIND (link
, dep_type
);
311 #ifdef INSN_SCHEDULING
312 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
313 in the bitmap caches of dependency information. */
314 if (true_dependency_cache
!= NULL
)
316 if ((int) dep_type
== 0)
317 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
318 else if (dep_type
== REG_DEP_ANTI
)
319 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
320 else if (dep_type
== REG_DEP_OUTPUT
)
321 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
326 /* A convenience wrapper to operate on an entire list. */
329 add_dependence_list (insn
, list
, dep_type
)
331 enum reg_note dep_type
;
333 for (; list
; list
= XEXP (list
, 1))
334 add_dependence (insn
, XEXP (list
, 0), dep_type
);
337 /* Similar, but free *LISTP at the same time. */
340 add_dependence_list_and_free (insn
, listp
, dep_type
)
343 enum reg_note dep_type
;
346 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
348 next
= XEXP (list
, 1);
349 add_dependence (insn
, XEXP (list
, 0), dep_type
);
350 free_INSN_LIST_node (list
);
354 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
355 goes along with that. */
358 set_sched_group_p (insn
)
363 SCHED_GROUP_P (insn
) = 1;
365 prev
= prev_nonnote_insn (insn
);
366 add_dependence (insn
, prev
, REG_DEP_ANTI
);
369 /* Process an insn's memory dependencies. There are four kinds of
372 (0) read dependence: read follows read
373 (1) true dependence: read follows write
374 (2) anti dependence: write follows read
375 (3) output dependence: write follows write
377 We are careful to build only dependencies which actually exist, and
378 use transitivity to avoid building too many links. */
380 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
381 The MEM is a memory reference contained within INSN, which we are saving
382 so that we can do memory aliasing on it. */
385 add_insn_mem_dependence (deps
, insn_list
, mem_list
, insn
, mem
)
387 rtx
*insn_list
, *mem_list
, insn
, mem
;
391 link
= alloc_INSN_LIST (insn
, *insn_list
);
394 if (current_sched_info
->use_cselib
)
396 mem
= shallow_copy_rtx (mem
);
397 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
399 link
= alloc_EXPR_LIST (VOIDmode
, mem
, *mem_list
);
402 deps
->pending_lists_length
++;
405 /* Make a dependency between every memory reference on the pending lists
406 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
407 dependencies for a read operation, similarly with FOR_WRITE. */
410 flush_pending_lists (deps
, insn
, for_read
, for_write
)
413 int for_read
, for_write
;
417 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
,
419 free_EXPR_LIST_list (&deps
->pending_read_mems
);
422 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
,
423 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
424 free_EXPR_LIST_list (&deps
->pending_write_mems
);
425 deps
->pending_lists_length
= 0;
427 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
,
428 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
429 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
430 deps
->pending_flush_length
= 1;
433 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
434 rtx, X, creating all dependencies generated by the write to the
435 destination of X, and reads of everything mentioned. */
438 sched_analyze_1 (deps
, x
, insn
)
444 rtx dest
= XEXP (x
, 0);
445 enum rtx_code code
= GET_CODE (x
);
450 if (GET_CODE (dest
) == PARALLEL
)
454 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
455 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
456 sched_analyze_1 (deps
,
457 gen_rtx_CLOBBER (VOIDmode
,
458 XEXP (XVECEXP (dest
, 0, i
), 0)),
461 if (GET_CODE (x
) == SET
)
462 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
466 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
467 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
469 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
471 /* The second and third arguments are values read by this insn. */
472 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
473 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
475 dest
= XEXP (dest
, 0);
478 if (GET_CODE (dest
) == REG
)
480 regno
= REGNO (dest
);
482 /* A hard reg in a wide mode may really be multiple registers.
483 If so, mark all of them just like the first. */
484 if (regno
< FIRST_PSEUDO_REGISTER
)
486 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
490 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
495 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
498 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
499 it does not reload. Ignore these as they have served their
501 else if (regno
>= deps
->max_reg
)
503 if (GET_CODE (PATTERN (insn
)) != USE
504 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
510 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
512 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
514 /* Pseudos that are REG_EQUIV to something may be replaced
515 by that during reloading. We need only add dependencies for
516 the address in the REG_EQUIV note. */
517 if (!reload_completed
518 && reg_known_equiv_p
[regno
]
519 && GET_CODE (reg_known_value
[regno
]) == MEM
)
520 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
522 /* Don't let it cross a call after scheduling if it doesn't
523 already cross one. */
524 if (REG_N_CALLS_CROSSED (regno
) == 0)
525 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_ANTI
);
528 else if (GET_CODE (dest
) == MEM
)
530 /* Writing memory. */
533 if (current_sched_info
->use_cselib
)
535 t
= shallow_copy_rtx (dest
);
536 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
537 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
540 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
542 /* Flush all pending reads and writes to prevent the pending lists
543 from getting any larger. Insn scheduling runs too slowly when
544 these lists get long. When compiling GCC with itself,
545 this flush occurs 8 times for sparc, and 10 times for m88k using
546 the default value of 32. */
547 flush_pending_lists (deps
, insn
, false, true);
551 rtx pending
, pending_mem
;
553 pending
= deps
->pending_read_insns
;
554 pending_mem
= deps
->pending_read_mems
;
557 if (anti_dependence (XEXP (pending_mem
, 0), t
))
558 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
560 pending
= XEXP (pending
, 1);
561 pending_mem
= XEXP (pending_mem
, 1);
564 pending
= deps
->pending_write_insns
;
565 pending_mem
= deps
->pending_write_mems
;
568 if (output_dependence (XEXP (pending_mem
, 0), t
))
569 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
571 pending
= XEXP (pending
, 1);
572 pending_mem
= XEXP (pending_mem
, 1);
575 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
578 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
579 &deps
->pending_write_mems
, insn
, dest
);
581 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
585 if (GET_CODE (x
) == SET
)
586 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
589 /* Analyze the uses of memory and registers in rtx X in INSN. */
592 sched_analyze_2 (deps
, x
, insn
)
615 /* Ignore constants. Note that we must handle CONST_DOUBLE here
616 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
617 this does not mean that this insn is using cc0. */
622 /* User of CC0 depends on immediately preceding insn. */
623 set_sched_group_p (insn
);
629 int regno
= REGNO (x
);
630 if (regno
< FIRST_PSEUDO_REGISTER
)
632 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
634 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
636 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
637 it does not reload. Ignore these as they have served their
639 else if (regno
>= deps
->max_reg
)
641 if (GET_CODE (PATTERN (insn
)) != USE
642 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
647 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
649 /* Pseudos that are REG_EQUIV to something may be replaced
650 by that during reloading. We need only add dependencies for
651 the address in the REG_EQUIV note. */
652 if (!reload_completed
653 && reg_known_equiv_p
[regno
]
654 && GET_CODE (reg_known_value
[regno
]) == MEM
)
655 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
657 /* If the register does not already cross any calls, then add this
658 insn to the sched_before_next_call list so that it will still
659 not cross calls after scheduling. */
660 if (REG_N_CALLS_CROSSED (regno
) == 0)
661 deps
->sched_before_next_call
662 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
669 /* Reading memory. */
671 rtx pending
, pending_mem
;
674 if (current_sched_info
->use_cselib
)
676 t
= shallow_copy_rtx (t
);
677 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
678 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
680 pending
= deps
->pending_read_insns
;
681 pending_mem
= deps
->pending_read_mems
;
684 if (read_dependence (XEXP (pending_mem
, 0), t
))
685 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
687 pending
= XEXP (pending
, 1);
688 pending_mem
= XEXP (pending_mem
, 1);
691 pending
= deps
->pending_write_insns
;
692 pending_mem
= deps
->pending_write_mems
;
695 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
697 add_dependence (insn
, XEXP (pending
, 0), 0);
699 pending
= XEXP (pending
, 1);
700 pending_mem
= XEXP (pending_mem
, 1);
703 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
704 if (GET_CODE (XEXP (u
, 0)) != JUMP_INSN
705 || deps_may_trap_p (x
))
706 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
708 /* Always add these dependencies to pending_reads, since
709 this insn may be followed by a write. */
710 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
711 &deps
->pending_read_mems
, insn
, x
);
713 /* Take advantage of tail recursion here. */
714 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
718 /* Force pending stores to memory in case a trap handler needs them. */
720 flush_pending_lists (deps
, insn
, true, false);
725 case UNSPEC_VOLATILE
:
727 /* Traditional and volatile asm instructions must be considered to use
728 and clobber all hard registers, all pseudo-registers and all of
729 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
731 Consider for instance a volatile asm that changes the fpu rounding
732 mode. An insn should not be moved across this even if it only uses
733 pseudo-regs because it might give an incorrectly rounded result. */
734 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
735 reg_pending_barrier
= true;
737 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
738 We can not just fall through here since then we would be confused
739 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
740 traditional asms unlike their normal usage. */
742 if (code
== ASM_OPERANDS
)
744 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
745 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
755 /* These both read and modify the result. We must handle them as writes
756 to get proper dependencies for following instructions. We must handle
757 them as reads to get proper dependencies from this to previous
758 instructions. Thus we need to pass them to both sched_analyze_1
759 and sched_analyze_2. We must call sched_analyze_2 first in order
760 to get the proper antecedent for the read. */
761 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
762 sched_analyze_1 (deps
, x
, insn
);
767 /* op0 = op0 + op1 */
768 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
769 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
770 sched_analyze_1 (deps
, x
, insn
);
777 /* Other cases: walk the insn. */
778 fmt
= GET_RTX_FORMAT (code
);
779 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
782 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
783 else if (fmt
[i
] == 'E')
784 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
785 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
789 /* Analyze an INSN with pattern X to find all dependencies. */
792 sched_analyze_insn (deps
, x
, insn
, loop_notes
)
797 RTX_CODE code
= GET_CODE (x
);
801 if (code
== COND_EXEC
)
803 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
805 /* ??? Should be recording conditions so we reduce the number of
806 false dependencies. */
807 x
= COND_EXEC_CODE (x
);
810 if (code
== SET
|| code
== CLOBBER
)
812 sched_analyze_1 (deps
, x
, insn
);
814 /* Bare clobber insns are used for letting life analysis, reg-stack
815 and others know that a value is dead. Depend on the last call
816 instruction so that reg-stack won't get confused. */
818 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_OUTPUT
);
820 else if (code
== PARALLEL
)
823 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
825 rtx sub
= XVECEXP (x
, 0, i
);
826 code
= GET_CODE (sub
);
828 if (code
== COND_EXEC
)
830 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
831 sub
= COND_EXEC_CODE (sub
);
832 code
= GET_CODE (sub
);
834 if (code
== SET
|| code
== CLOBBER
)
835 sched_analyze_1 (deps
, sub
, insn
);
837 sched_analyze_2 (deps
, sub
, insn
);
841 sched_analyze_2 (deps
, x
, insn
);
843 /* Mark registers CLOBBERED or used by called function. */
844 if (GET_CODE (insn
) == CALL_INSN
)
846 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
848 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
849 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
851 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
853 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
854 reg_pending_barrier
= true;
857 if (GET_CODE (insn
) == JUMP_INSN
)
860 next
= next_nonnote_insn (insn
);
861 if (next
&& GET_CODE (next
) == BARRIER
)
862 reg_pending_barrier
= true;
865 rtx pending
, pending_mem
;
869 (*current_sched_info
->compute_jump_reg_dependencies
) (insn
, &tmp
);
870 /* Make latency of jump equal to 0 by using anti-dependence. */
871 EXECUTE_IF_SET_IN_REG_SET (&tmp
, 0, i
,
873 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
874 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_ANTI
);
875 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_ANTI
);
876 reg_last
->uses_length
++;
877 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
879 CLEAR_REG_SET (&tmp
);
881 /* All memory writes and volatile reads must happen before the
882 jump. Non-volatile reads must happen before the jump iff
883 the result is needed by the above register used mask. */
885 pending
= deps
->pending_write_insns
;
886 pending_mem
= deps
->pending_write_mems
;
889 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
890 pending
= XEXP (pending
, 1);
891 pending_mem
= XEXP (pending_mem
, 1);
894 pending
= deps
->pending_read_insns
;
895 pending_mem
= deps
->pending_read_mems
;
898 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0)))
899 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
900 pending
= XEXP (pending
, 1);
901 pending_mem
= XEXP (pending_mem
, 1);
904 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
909 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
910 block, then we must be sure that no instructions are scheduled across it.
911 Otherwise, the reg_n_refs info (which depends on loop_depth) would
917 /* Update loop_notes with any notes from this insn. Also determine
918 if any of the notes on the list correspond to instruction scheduling
919 barriers (loop, eh & setjmp notes, but not range notes). */
921 while (XEXP (link
, 1))
923 if (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
924 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
925 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_BEG
926 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_END
)
927 reg_pending_barrier
= true;
929 link
= XEXP (link
, 1);
931 XEXP (link
, 1) = REG_NOTES (insn
);
932 REG_NOTES (insn
) = loop_notes
;
935 /* If this instruction can throw an exception, then moving it changes
936 where block boundaries fall. This is mighty confusing elsewhere.
937 Therefore, prevent such an instruction from being moved. */
938 if (can_throw_internal (insn
))
939 reg_pending_barrier
= true;
941 /* Add dependencies if a scheduling barrier was found. */
942 if (reg_pending_barrier
)
944 /* In the case of barrier the most added dependencies are not
945 real, so we use anti-dependence here. */
946 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
948 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
950 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
951 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
952 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_ANTI
);
953 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_ANTI
);
958 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
960 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
961 add_dependence_list_and_free (insn
, ®_last
->uses
,
963 add_dependence_list_and_free (insn
, ®_last
->sets
,
965 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
967 reg_last
->uses_length
= 0;
968 reg_last
->clobbers_length
= 0;
972 for (i
= 0; i
< deps
->max_reg
; i
++)
974 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
975 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
976 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
979 flush_pending_lists (deps
, insn
, true, true);
980 reg_pending_barrier
= false;
984 /* If the current insn is conditional, we can't free any
986 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
988 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
990 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
991 add_dependence_list (insn
, reg_last
->sets
, 0);
992 add_dependence_list (insn
, reg_last
->clobbers
, 0);
993 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
994 reg_last
->uses_length
++;
996 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
998 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
999 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1000 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1001 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1002 reg_last
->clobbers_length
++;
1004 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1006 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1007 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1008 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_OUTPUT
);
1009 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1010 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1015 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
1017 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1018 add_dependence_list (insn
, reg_last
->sets
, 0);
1019 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1020 reg_last
->uses_length
++;
1021 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1023 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1025 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1026 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1027 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1029 add_dependence_list_and_free (insn
, ®_last
->sets
,
1031 add_dependence_list_and_free (insn
, ®_last
->uses
,
1033 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1035 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1036 reg_last
->clobbers_length
= 0;
1037 reg_last
->uses_length
= 0;
1041 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1042 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1044 reg_last
->clobbers_length
++;
1045 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1047 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1049 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1050 add_dependence_list_and_free (insn
, ®_last
->sets
,
1052 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1054 add_dependence_list_and_free (insn
, ®_last
->uses
,
1056 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1057 reg_last
->uses_length
= 0;
1058 reg_last
->clobbers_length
= 0;
1062 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1063 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1064 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1066 CLEAR_REG_SET (reg_pending_uses
);
1067 CLEAR_REG_SET (reg_pending_clobbers
);
1068 CLEAR_REG_SET (reg_pending_sets
);
1070 /* If we are currently in a libcall scheduling group, then mark the
1071 current insn as being in a scheduling group and that it can not
1072 be moved into a different basic block. */
1074 if (deps
->libcall_block_tail_insn
)
1076 set_sched_group_p (insn
);
1077 CANT_MOVE (insn
) = 1;
1080 /* If a post-call group is still open, see if it should remain so.
1081 This insn must be a simple move of a hard reg to a pseudo or
1084 We must avoid moving these insns for correctness on
1085 SMALL_REGISTER_CLASS machines, and for special registers like
1086 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1087 hard regs for all targets. */
1089 if (deps
->in_post_call_group_p
)
1091 rtx tmp
, set
= single_set (insn
);
1092 int src_regno
, dest_regno
;
1095 goto end_call_group
;
1097 tmp
= SET_DEST (set
);
1098 if (GET_CODE (tmp
) == SUBREG
)
1099 tmp
= SUBREG_REG (tmp
);
1100 if (GET_CODE (tmp
) == REG
)
1101 dest_regno
= REGNO (tmp
);
1103 goto end_call_group
;
1105 tmp
= SET_SRC (set
);
1106 if (GET_CODE (tmp
) == SUBREG
)
1107 tmp
= SUBREG_REG (tmp
);
1108 if (GET_CODE (tmp
) == REG
)
1109 src_regno
= REGNO (tmp
);
1111 goto end_call_group
;
1113 if (src_regno
< FIRST_PSEUDO_REGISTER
1114 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1116 set_sched_group_p (insn
);
1117 CANT_MOVE (insn
) = 1;
1122 deps
->in_post_call_group_p
= false;
1127 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1128 for every dependency. */
1131 sched_analyze (deps
, head
, tail
)
1138 if (current_sched_info
->use_cselib
)
1141 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1143 rtx link
, end_seq
, r0
, set
;
1145 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1147 /* Clear out the stale LOG_LINKS from flow. */
1148 free_INSN_LIST_list (&LOG_LINKS (insn
));
1150 /* Make each JUMP_INSN a scheduling barrier for memory
1152 if (GET_CODE (insn
) == JUMP_INSN
)
1154 /* Keep the list a reasonable size. */
1155 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1156 flush_pending_lists (deps
, insn
, true, true);
1158 deps
->last_pending_memory_flush
1159 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1161 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1164 else if (GET_CODE (insn
) == CALL_INSN
)
1168 CANT_MOVE (insn
) = 1;
1170 /* Clear out the stale LOG_LINKS from flow. */
1171 free_INSN_LIST_list (&LOG_LINKS (insn
));
1173 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1175 /* This is setjmp. Assume that all registers, not just
1176 hard registers, may be clobbered by this call. */
1177 reg_pending_barrier
= true;
1181 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1182 /* A call may read and modify global register variables. */
1185 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1186 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1188 /* Other call-clobbered hard regs may be clobbered.
1189 Since we only have a choice between 'might be clobbered'
1190 and 'definitely not clobbered', we must include all
1191 partly call-clobbered registers here. */
1192 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
1193 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1194 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1195 /* We don't know what set of fixed registers might be used
1196 by the function, but it is certain that the stack pointer
1197 is among them, but be conservative. */
1198 else if (fixed_regs
[i
])
1199 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1200 /* The frame pointer is normally not used by the function
1201 itself, but by the debugger. */
1202 /* ??? MIPS o32 is an exception. It uses the frame pointer
1203 in the macro expansion of jal but does not represent this
1204 fact in the call_insn rtl. */
1205 else if (i
== FRAME_POINTER_REGNUM
1206 || (i
== HARD_FRAME_POINTER_REGNUM
1207 && (! reload_completed
|| frame_pointer_needed
)))
1208 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1211 /* For each insn which shouldn't cross a call, add a dependence
1212 between that insn and this call insn. */
1213 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
,
1216 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1219 /* In the absence of interprocedural alias analysis, we must flush
1220 all pending reads and writes, and start new dependencies starting
1221 from here. But only flush writes for constant calls (which may
1222 be passed a pointer to something we haven't written yet). */
1223 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1225 /* Remember the last function call for limiting lifetimes. */
1226 free_INSN_LIST_list (&deps
->last_function_call
);
1227 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1229 /* Before reload, begin a post-call group, so as to keep the
1230 lifetimes of hard registers correct. */
1231 if (! reload_completed
)
1232 deps
->in_post_call_group_p
= true;
1235 /* See comments on reemit_notes as to why we do this.
1236 ??? Actually, the reemit_notes just say what is done, not why. */
1238 if (GET_CODE (insn
) == NOTE
1239 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1240 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
1241 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1242 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
))
1246 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1247 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
)
1248 rtx_region
= GEN_INT (NOTE_EH_HANDLER (insn
));
1250 rtx_region
= GEN_INT (0);
1252 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1255 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1256 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1258 CONST_OR_PURE_CALL_P (loop_notes
) = CONST_OR_PURE_CALL_P (insn
);
1261 if (current_sched_info
->use_cselib
)
1262 cselib_process_insn (insn
);
1264 /* Now that we have completed handling INSN, check and see if it is
1265 a CLOBBER beginning a libcall block. If it is, record the
1266 end of the libcall sequence.
1268 We want to schedule libcall blocks as a unit before reload. While
1269 this restricts scheduling, it preserves the meaning of a libcall
1272 As a side effect, we may get better code due to decreased register
1273 pressure as well as less chance of a foreign insn appearing in
1275 if (!reload_completed
1276 /* Note we may have nested libcall sequences. We only care about
1277 the outermost libcall sequence. */
1278 && deps
->libcall_block_tail_insn
== 0
1279 /* The sequence must start with a clobber of a register. */
1280 && GET_CODE (insn
) == INSN
1281 && GET_CODE (PATTERN (insn
)) == CLOBBER
1282 && (r0
= XEXP (PATTERN (insn
), 0), GET_CODE (r0
) == REG
)
1283 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
1284 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1285 && (link
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)) != 0
1286 && (end_seq
= XEXP (link
, 0)) != 0
1287 /* The insn referenced by the REG_LIBCALL note must be a
1288 simple nop copy with the same destination as the register
1289 mentioned in the clobber. */
1290 && (set
= single_set (end_seq
)) != 0
1291 && SET_DEST (set
) == r0
&& SET_SRC (set
) == r0
1292 /* And finally the insn referenced by the REG_LIBCALL must
1293 also contain a REG_EQUAL note and a REG_RETVAL note. */
1294 && find_reg_note (end_seq
, REG_EQUAL
, NULL_RTX
) != 0
1295 && find_reg_note (end_seq
, REG_RETVAL
, NULL_RTX
) != 0)
1296 deps
->libcall_block_tail_insn
= XEXP (link
, 0);
1298 /* If we have reached the end of a libcall block, then close the
1300 if (deps
->libcall_block_tail_insn
== insn
)
1301 deps
->libcall_block_tail_insn
= 0;
1305 if (current_sched_info
->use_cselib
)
1313 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1314 dependences from LOG_LINKS to build forward dependences in
1318 compute_forward_dependences (head
, tail
)
1323 enum reg_note dep_type
;
1325 next_tail
= NEXT_INSN (tail
);
1326 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1328 if (! INSN_P (insn
))
1331 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1333 rtx x
= XEXP (link
, 0);
1336 if (x
!= XEXP (link
, 0))
1339 #ifdef ENABLE_CHECKING
1340 /* If add_dependence is working properly there should never
1341 be notes, deleted insns or duplicates in the backward
1342 links. Thus we need not check for them here.
1344 However, if we have enabled checking we might as well go
1345 ahead and verify that add_dependence worked properly. */
1346 if (GET_CODE (x
) == NOTE
1347 || INSN_DELETED_P (x
)
1348 || (forward_dependency_cache
!= NULL
1349 && TEST_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1351 || (forward_dependency_cache
== NULL
1352 && find_insn_list (insn
, INSN_DEPEND (x
))))
1354 if (forward_dependency_cache
!= NULL
)
1355 SET_BIT (forward_dependency_cache
[INSN_LUID (x
)],
1359 new_link
= alloc_INSN_LIST (insn
, INSN_DEPEND (x
));
1361 dep_type
= REG_NOTE_KIND (link
);
1362 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1364 INSN_DEPEND (x
) = new_link
;
1365 INSN_DEP_COUNT (insn
) += 1;
1370 /* Initialize variables for region data dependence analysis.
1371 n_bbs is the number of region blocks. */
1377 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1379 deps
->max_reg
= max_reg
;
1380 deps
->reg_last
= (struct deps_reg
*)
1381 xcalloc (max_reg
, sizeof (struct deps_reg
));
1382 INIT_REG_SET (&deps
->reg_last_in_use
);
1384 deps
->pending_read_insns
= 0;
1385 deps
->pending_read_mems
= 0;
1386 deps
->pending_write_insns
= 0;
1387 deps
->pending_write_mems
= 0;
1388 deps
->pending_lists_length
= 0;
1389 deps
->pending_flush_length
= 0;
1390 deps
->last_pending_memory_flush
= 0;
1391 deps
->last_function_call
= 0;
1392 deps
->sched_before_next_call
= 0;
1393 deps
->in_post_call_group_p
= false;
1394 deps
->libcall_block_tail_insn
= 0;
1397 /* Free insn lists found in DEPS. */
1405 free_INSN_LIST_list (&deps
->pending_read_insns
);
1406 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1407 free_INSN_LIST_list (&deps
->pending_write_insns
);
1408 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1409 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1411 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1412 times. For a test case with 42000 regs and 8000 small basic blocks,
1413 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1414 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1416 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1418 free_INSN_LIST_list (®_last
->uses
);
1420 free_INSN_LIST_list (®_last
->sets
);
1421 if (reg_last
->clobbers
)
1422 free_INSN_LIST_list (®_last
->clobbers
);
1424 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1426 free (deps
->reg_last
);
1429 /* If it is profitable to use them, initialize caches for tracking
1430 dependency information. LUID is the number of insns to be scheduled,
1431 it is used in the estimate of profitability. */
1434 init_dependency_caches (luid
)
1437 /* ?!? We could save some memory by computing a per-region luid mapping
1438 which could reduce both the number of vectors in the cache and the size
1439 of each vector. Instead we just avoid the cache entirely unless the
1440 average number of instructions in a basic block is very high. See
1441 the comment before the declaration of true_dependency_cache for
1442 what we consider "very high". */
1443 if (luid
/ n_basic_blocks
> 100 * 5)
1445 true_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1446 sbitmap_vector_zero (true_dependency_cache
, luid
);
1447 anti_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1448 sbitmap_vector_zero (anti_dependency_cache
, luid
);
1449 output_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1450 sbitmap_vector_zero (output_dependency_cache
, luid
);
1451 #ifdef ENABLE_CHECKING
1452 forward_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1453 sbitmap_vector_zero (forward_dependency_cache
, luid
);
1458 /* Free the caches allocated in init_dependency_caches. */
1461 free_dependency_caches ()
1463 if (true_dependency_cache
)
1465 sbitmap_vector_free (true_dependency_cache
);
1466 true_dependency_cache
= NULL
;
1467 sbitmap_vector_free (anti_dependency_cache
);
1468 anti_dependency_cache
= NULL
;
1469 sbitmap_vector_free (output_dependency_cache
);
1470 output_dependency_cache
= NULL
;
1471 #ifdef ENABLE_CHECKING
1472 sbitmap_vector_free (forward_dependency_cache
);
1473 forward_dependency_cache
= NULL
;
1478 /* Initialize some global variables needed by the dependency analysis
1484 reg_pending_sets
= INITIALIZE_REG_SET (reg_pending_sets_head
);
1485 reg_pending_clobbers
= INITIALIZE_REG_SET (reg_pending_clobbers_head
);
1486 reg_pending_uses
= INITIALIZE_REG_SET (reg_pending_uses_head
);
1487 reg_pending_barrier
= false;
1490 /* Free everything used by the dependency analysis code. */
1493 finish_deps_global ()
1495 FREE_REG_SET (reg_pending_sets
);
1496 FREE_REG_SET (reg_pending_clobbers
);
1497 FREE_REG_SET (reg_pending_uses
);