1 /* Instruction scheduling pass. This file computes dependencies between
3 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
5 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
6 and currently maintained by, Jim Wilson (wilson@cygnus.com)
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 2, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to the Free
22 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
27 #include "coretypes.h"
32 #include "hard-reg-set.h"
33 #include "basic-block.h"
37 #include "insn-config.h"
38 #include "insn-attr.h"
42 #include "sched-int.h"
47 extern char *reg_known_equiv_p
;
48 extern rtx
*reg_known_value
;
50 static regset_head reg_pending_sets_head
;
51 static regset_head reg_pending_clobbers_head
;
52 static regset_head reg_pending_uses_head
;
54 static regset reg_pending_sets
;
55 static regset reg_pending_clobbers
;
56 static regset reg_pending_uses
;
57 static bool reg_pending_barrier
;
59 /* To speed up the test for duplicate dependency links we keep a
60 record of dependencies created by add_dependence when the average
61 number of instructions in a basic block is very large.
63 Studies have shown that there is typically around 5 instructions between
64 branches for typical C code. So we can make a guess that the average
65 basic block is approximately 5 instructions long; we will choose 100X
66 the average size as a very large basic block.
68 Each insn has associated bitmaps for its dependencies. Each bitmap
69 has enough entries to represent a dependency on any other insn in
70 the insn chain. All bitmap for true dependencies cache is
71 allocated then the rest two ones are also allocated. */
72 static sbitmap
*true_dependency_cache
;
73 static sbitmap
*anti_dependency_cache
;
74 static sbitmap
*output_dependency_cache
;
76 /* To speed up checking consistency of formed forward insn
77 dependencies we use the following cache. Another possible solution
78 could be switching off checking duplication of insns in forward
80 #ifdef ENABLE_CHECKING
81 static sbitmap
*forward_dependency_cache
;
84 static int deps_may_trap_p
PARAMS ((rtx
));
85 static void add_dependence_list
PARAMS ((rtx
, rtx
, enum reg_note
));
86 static void add_dependence_list_and_free
PARAMS ((rtx
, rtx
*, enum reg_note
));
87 static void set_sched_group_p
PARAMS ((rtx
));
89 static void flush_pending_lists
PARAMS ((struct deps
*, rtx
, int, int));
90 static void sched_analyze_1
PARAMS ((struct deps
*, rtx
, rtx
));
91 static void sched_analyze_2
PARAMS ((struct deps
*, rtx
, rtx
));
92 static void sched_analyze_insn
PARAMS ((struct deps
*, rtx
, rtx
, rtx
));
94 static rtx get_condition
PARAMS ((rtx
));
95 static int conditions_mutex_p
PARAMS ((rtx
, rtx
));
97 /* Return nonzero if a load of the memory reference MEM can cause a trap. */
100 deps_may_trap_p (mem
)
103 rtx addr
= XEXP (mem
, 0);
106 && REGNO (addr
) >= FIRST_PSEUDO_REGISTER
107 && reg_known_value
[REGNO (addr
)])
108 addr
= reg_known_value
[REGNO (addr
)];
109 return rtx_addr_can_trap_p (addr
);
112 /* Return the INSN_LIST containing INSN in LIST, or NULL
113 if LIST does not contain INSN. */
116 find_insn_list (insn
, list
)
122 if (XEXP (list
, 0) == insn
)
124 list
= XEXP (list
, 1);
129 /* Find the condition under which INSN is executed. */
135 rtx pat
= PATTERN (insn
);
140 if (GET_CODE (pat
) == COND_EXEC
)
141 return COND_EXEC_TEST (pat
);
142 if (GET_CODE (insn
) != JUMP_INSN
)
144 if (GET_CODE (pat
) != SET
|| SET_SRC (pat
) != pc_rtx
)
146 if (GET_CODE (SET_DEST (pat
)) != IF_THEN_ELSE
)
148 pat
= SET_DEST (pat
);
149 cond
= XEXP (pat
, 0);
150 if (GET_CODE (XEXP (cond
, 1)) == LABEL_REF
151 && XEXP (cond
, 2) == pc_rtx
)
153 else if (GET_CODE (XEXP (cond
, 2)) == LABEL_REF
154 && XEXP (cond
, 1) == pc_rtx
)
155 return gen_rtx_fmt_ee (reverse_condition (GET_CODE (cond
)), GET_MODE (cond
),
156 XEXP (cond
, 0), XEXP (cond
, 1));
161 /* Return nonzero if conditions COND1 and COND2 can never be both true. */
164 conditions_mutex_p (cond1
, cond2
)
167 if (GET_RTX_CLASS (GET_CODE (cond1
)) == '<'
168 && GET_RTX_CLASS (GET_CODE (cond2
)) == '<'
169 && GET_CODE (cond1
) == reverse_condition (GET_CODE (cond2
))
170 && XEXP (cond1
, 0) == XEXP (cond2
, 0)
171 && XEXP (cond1
, 1) == XEXP (cond2
, 1))
176 /* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
177 LOG_LINKS of INSN, if not already there. DEP_TYPE indicates the
178 type of dependence that this link represents. The function returns
179 nonzero if a new entry has been added to insn's LOG_LINK. */
182 add_dependence (insn
, elem
, dep_type
)
185 enum reg_note dep_type
;
191 /* Don't depend an insn on itself. */
195 /* We can get a dependency on deleted insns due to optimizations in
196 the register allocation and reloading or due to splitting. Any
197 such dependency is useless and can be ignored. */
198 if (GET_CODE (elem
) == NOTE
)
201 /* flow.c doesn't handle conditional lifetimes entirely correctly;
202 calls mess up the conditional lifetimes. */
203 /* ??? add_dependence is the wrong place to be eliding dependencies,
204 as that forgets that the condition expressions themselves may
206 if (GET_CODE (insn
) != CALL_INSN
&& GET_CODE (elem
) != CALL_INSN
)
208 cond1
= get_condition (insn
);
209 cond2
= get_condition (elem
);
211 && conditions_mutex_p (cond1
, cond2
)
212 /* Make sure first instruction doesn't affect condition of second
213 instruction if switched. */
214 && !modified_in_p (cond1
, elem
)
215 /* Make sure second instruction doesn't affect condition of first
216 instruction if switched. */
217 && !modified_in_p (cond2
, insn
))
222 #ifdef INSN_SCHEDULING
223 /* ??? No good way to tell from here whether we're doing interblock
224 scheduling. Possibly add another callback. */
226 /* (This code is guarded by INSN_SCHEDULING, otherwise INSN_BB is undefined.)
227 No need for interblock dependences with calls, since
228 calls are not moved between blocks. Note: the edge where
229 elem is a CALL is still required. */
230 if (GET_CODE (insn
) == CALL_INSN
231 && (INSN_BB (elem
) != INSN_BB (insn
)))
235 /* If we already have a dependency for ELEM, then we do not need to
236 do anything. Avoiding the list walk below can cut compile times
237 dramatically for some code. */
238 if (true_dependency_cache
!= NULL
)
240 enum reg_note present_dep_type
= 0;
242 if (anti_dependency_cache
== NULL
|| output_dependency_cache
== NULL
)
244 if (TEST_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
)))
245 /* Do nothing (present_set_type is already 0). */
247 else if (TEST_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
249 present_dep_type
= REG_DEP_ANTI
;
250 else if (TEST_BIT (output_dependency_cache
[INSN_LUID (insn
)],
252 present_dep_type
= REG_DEP_OUTPUT
;
255 if (present_p
&& (int) dep_type
>= (int) present_dep_type
)
260 /* Check that we don't already have this dependence. */
262 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
263 if (XEXP (link
, 0) == elem
)
265 #ifdef INSN_SCHEDULING
266 /* Clear corresponding cache entry because type of the link
268 if (true_dependency_cache
!= NULL
)
270 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
271 RESET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
273 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
274 && output_dependency_cache
)
275 RESET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
282 /* If this is a more restrictive type of dependence than the existing
283 one, then change the existing dependence to this type. */
284 if ((int) dep_type
< (int) REG_NOTE_KIND (link
))
285 PUT_REG_NOTE_KIND (link
, dep_type
);
287 #ifdef INSN_SCHEDULING
288 /* If we are adding a dependency to INSN's LOG_LINKs, then
289 note that in the bitmap caches of dependency information. */
290 if (true_dependency_cache
!= NULL
)
292 if ((int) REG_NOTE_KIND (link
) == 0)
293 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)],
295 else if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
)
296 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)],
298 else if (REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
299 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)],
305 /* Might want to check one level of transitivity to save conses. */
307 link
= alloc_INSN_LIST (elem
, LOG_LINKS (insn
));
308 LOG_LINKS (insn
) = link
;
310 /* Insn dependency, not data dependency. */
311 PUT_REG_NOTE_KIND (link
, dep_type
);
313 #ifdef INSN_SCHEDULING
314 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
315 in the bitmap caches of dependency information. */
316 if (true_dependency_cache
!= NULL
)
318 if ((int) dep_type
== 0)
319 SET_BIT (true_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
320 else if (dep_type
== REG_DEP_ANTI
)
321 SET_BIT (anti_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
322 else if (dep_type
== REG_DEP_OUTPUT
)
323 SET_BIT (output_dependency_cache
[INSN_LUID (insn
)], INSN_LUID (elem
));
329 /* A convenience wrapper to operate on an entire list. */
332 add_dependence_list (insn
, list
, dep_type
)
334 enum reg_note dep_type
;
336 for (; list
; list
= XEXP (list
, 1))
337 add_dependence (insn
, XEXP (list
, 0), dep_type
);
340 /* Similar, but free *LISTP at the same time. */
343 add_dependence_list_and_free (insn
, listp
, dep_type
)
346 enum reg_note dep_type
;
349 for (list
= *listp
, *listp
= NULL
; list
; list
= next
)
351 next
= XEXP (list
, 1);
352 add_dependence (insn
, XEXP (list
, 0), dep_type
);
353 free_INSN_LIST_node (list
);
357 /* Set SCHED_GROUP_P and care for the rest of the bookkeeping that
358 goes along with that. */
361 set_sched_group_p (insn
)
366 SCHED_GROUP_P (insn
) = 1;
368 prev
= prev_nonnote_insn (insn
);
369 add_dependence (insn
, prev
, REG_DEP_ANTI
);
372 /* Process an insn's memory dependencies. There are four kinds of
375 (0) read dependence: read follows read
376 (1) true dependence: read follows write
377 (2) anti dependence: write follows read
378 (3) output dependence: write follows write
380 We are careful to build only dependencies which actually exist, and
381 use transitivity to avoid building too many links. */
383 /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
384 The MEM is a memory reference contained within INSN, which we are saving
385 so that we can do memory aliasing on it. */
388 add_insn_mem_dependence (deps
, insn_list
, mem_list
, insn
, mem
)
390 rtx
*insn_list
, *mem_list
, insn
, mem
;
394 link
= alloc_INSN_LIST (insn
, *insn_list
);
397 if (current_sched_info
->use_cselib
)
399 mem
= shallow_copy_rtx (mem
);
400 XEXP (mem
, 0) = cselib_subst_to_values (XEXP (mem
, 0));
402 link
= alloc_EXPR_LIST (VOIDmode
, mem
, *mem_list
);
405 deps
->pending_lists_length
++;
408 /* Make a dependency between every memory reference on the pending lists
409 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
410 dependencies for a read operation, similarly with FOR_WRITE. */
413 flush_pending_lists (deps
, insn
, for_read
, for_write
)
416 int for_read
, for_write
;
420 add_dependence_list_and_free (insn
, &deps
->pending_read_insns
,
422 free_EXPR_LIST_list (&deps
->pending_read_mems
);
425 add_dependence_list_and_free (insn
, &deps
->pending_write_insns
,
426 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
427 free_EXPR_LIST_list (&deps
->pending_write_mems
);
428 deps
->pending_lists_length
= 0;
430 add_dependence_list_and_free (insn
, &deps
->last_pending_memory_flush
,
431 for_read
? REG_DEP_ANTI
: REG_DEP_OUTPUT
);
432 deps
->last_pending_memory_flush
= alloc_INSN_LIST (insn
, NULL_RTX
);
433 deps
->pending_flush_length
= 1;
436 /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
437 rtx, X, creating all dependencies generated by the write to the
438 destination of X, and reads of everything mentioned. */
441 sched_analyze_1 (deps
, x
, insn
)
447 rtx dest
= XEXP (x
, 0);
448 enum rtx_code code
= GET_CODE (x
);
453 if (GET_CODE (dest
) == PARALLEL
)
457 for (i
= XVECLEN (dest
, 0) - 1; i
>= 0; i
--)
458 if (XEXP (XVECEXP (dest
, 0, i
), 0) != 0)
459 sched_analyze_1 (deps
,
460 gen_rtx_CLOBBER (VOIDmode
,
461 XEXP (XVECEXP (dest
, 0, i
), 0)),
464 if (GET_CODE (x
) == SET
)
465 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
469 while (GET_CODE (dest
) == STRICT_LOW_PART
|| GET_CODE (dest
) == SUBREG
470 || GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
472 if (GET_CODE (dest
) == STRICT_LOW_PART
473 || GET_CODE (dest
) == ZERO_EXTRACT
474 || GET_CODE (dest
) == SIGN_EXTRACT
475 || read_modify_subreg_p (dest
))
477 /* These both read and modify the result. We must handle
478 them as writes to get proper dependencies for following
479 instructions. We must handle them as reads to get proper
480 dependencies from this to previous instructions.
481 Thus we need to call sched_analyze_2. */
483 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
485 if (GET_CODE (dest
) == ZERO_EXTRACT
|| GET_CODE (dest
) == SIGN_EXTRACT
)
487 /* The second and third arguments are values read by this insn. */
488 sched_analyze_2 (deps
, XEXP (dest
, 1), insn
);
489 sched_analyze_2 (deps
, XEXP (dest
, 2), insn
);
491 dest
= XEXP (dest
, 0);
494 if (GET_CODE (dest
) == REG
)
496 regno
= REGNO (dest
);
498 /* A hard reg in a wide mode may really be multiple registers.
499 If so, mark all of them just like the first. */
500 if (regno
< FIRST_PSEUDO_REGISTER
)
502 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (dest
));
506 SET_REGNO_REG_SET (reg_pending_sets
, regno
+ i
);
511 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
+ i
);
514 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
515 it does not reload. Ignore these as they have served their
517 else if (regno
>= deps
->max_reg
)
519 if (GET_CODE (PATTERN (insn
)) != USE
520 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
526 SET_REGNO_REG_SET (reg_pending_sets
, regno
);
528 SET_REGNO_REG_SET (reg_pending_clobbers
, regno
);
530 /* Pseudos that are REG_EQUIV to something may be replaced
531 by that during reloading. We need only add dependencies for
532 the address in the REG_EQUIV note. */
533 if (!reload_completed
534 && reg_known_equiv_p
[regno
]
535 && GET_CODE (reg_known_value
[regno
]) == MEM
)
536 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
538 /* Don't let it cross a call after scheduling if it doesn't
539 already cross one. */
540 if (REG_N_CALLS_CROSSED (regno
) == 0)
541 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_ANTI
);
544 else if (GET_CODE (dest
) == MEM
)
546 /* Writing memory. */
549 if (current_sched_info
->use_cselib
)
551 t
= shallow_copy_rtx (dest
);
552 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
553 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
556 if (deps
->pending_lists_length
> MAX_PENDING_LIST_LENGTH
)
558 /* Flush all pending reads and writes to prevent the pending lists
559 from getting any larger. Insn scheduling runs too slowly when
560 these lists get long. When compiling GCC with itself,
561 this flush occurs 8 times for sparc, and 10 times for m88k using
562 the default value of 32. */
563 flush_pending_lists (deps
, insn
, false, true);
567 rtx pending
, pending_mem
;
569 pending
= deps
->pending_read_insns
;
570 pending_mem
= deps
->pending_read_mems
;
573 if (anti_dependence (XEXP (pending_mem
, 0), t
))
574 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
576 pending
= XEXP (pending
, 1);
577 pending_mem
= XEXP (pending_mem
, 1);
580 pending
= deps
->pending_write_insns
;
581 pending_mem
= deps
->pending_write_mems
;
584 if (output_dependence (XEXP (pending_mem
, 0), t
))
585 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
587 pending
= XEXP (pending
, 1);
588 pending_mem
= XEXP (pending_mem
, 1);
591 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
594 add_insn_mem_dependence (deps
, &deps
->pending_write_insns
,
595 &deps
->pending_write_mems
, insn
, dest
);
597 sched_analyze_2 (deps
, XEXP (dest
, 0), insn
);
601 if (GET_CODE (x
) == SET
)
602 sched_analyze_2 (deps
, SET_SRC (x
), insn
);
605 /* Analyze the uses of memory and registers in rtx X in INSN. */
608 sched_analyze_2 (deps
, x
, insn
)
631 /* Ignore constants. Note that we must handle CONST_DOUBLE here
632 because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
633 this does not mean that this insn is using cc0. */
638 /* User of CC0 depends on immediately preceding insn. */
639 set_sched_group_p (insn
);
645 int regno
= REGNO (x
);
646 if (regno
< FIRST_PSEUDO_REGISTER
)
648 int i
= HARD_REGNO_NREGS (regno
, GET_MODE (x
));
650 SET_REGNO_REG_SET (reg_pending_uses
, regno
+ i
);
652 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
653 it does not reload. Ignore these as they have served their
655 else if (regno
>= deps
->max_reg
)
657 if (GET_CODE (PATTERN (insn
)) != USE
658 && GET_CODE (PATTERN (insn
)) != CLOBBER
)
663 SET_REGNO_REG_SET (reg_pending_uses
, regno
);
665 /* Pseudos that are REG_EQUIV to something may be replaced
666 by that during reloading. We need only add dependencies for
667 the address in the REG_EQUIV note. */
668 if (!reload_completed
669 && reg_known_equiv_p
[regno
]
670 && GET_CODE (reg_known_value
[regno
]) == MEM
)
671 sched_analyze_2 (deps
, XEXP (reg_known_value
[regno
], 0), insn
);
673 /* If the register does not already cross any calls, then add this
674 insn to the sched_before_next_call list so that it will still
675 not cross calls after scheduling. */
676 if (REG_N_CALLS_CROSSED (regno
) == 0)
677 deps
->sched_before_next_call
678 = alloc_INSN_LIST (insn
, deps
->sched_before_next_call
);
685 /* Reading memory. */
687 rtx pending
, pending_mem
;
690 if (current_sched_info
->use_cselib
)
692 t
= shallow_copy_rtx (t
);
693 cselib_lookup (XEXP (t
, 0), Pmode
, 1);
694 XEXP (t
, 0) = cselib_subst_to_values (XEXP (t
, 0));
696 pending
= deps
->pending_read_insns
;
697 pending_mem
= deps
->pending_read_mems
;
700 if (read_dependence (XEXP (pending_mem
, 0), t
))
701 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_ANTI
);
703 pending
= XEXP (pending
, 1);
704 pending_mem
= XEXP (pending_mem
, 1);
707 pending
= deps
->pending_write_insns
;
708 pending_mem
= deps
->pending_write_mems
;
711 if (true_dependence (XEXP (pending_mem
, 0), VOIDmode
,
713 add_dependence (insn
, XEXP (pending
, 0), 0);
715 pending
= XEXP (pending
, 1);
716 pending_mem
= XEXP (pending_mem
, 1);
719 for (u
= deps
->last_pending_memory_flush
; u
; u
= XEXP (u
, 1))
720 if (GET_CODE (XEXP (u
, 0)) != JUMP_INSN
721 || deps_may_trap_p (x
))
722 add_dependence (insn
, XEXP (u
, 0), REG_DEP_ANTI
);
724 /* Always add these dependencies to pending_reads, since
725 this insn may be followed by a write. */
726 add_insn_mem_dependence (deps
, &deps
->pending_read_insns
,
727 &deps
->pending_read_mems
, insn
, x
);
729 /* Take advantage of tail recursion here. */
730 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
734 /* Force pending stores to memory in case a trap handler needs them. */
736 flush_pending_lists (deps
, insn
, true, false);
741 case UNSPEC_VOLATILE
:
743 /* Traditional and volatile asm instructions must be considered to use
744 and clobber all hard registers, all pseudo-registers and all of
745 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
747 Consider for instance a volatile asm that changes the fpu rounding
748 mode. An insn should not be moved across this even if it only uses
749 pseudo-regs because it might give an incorrectly rounded result. */
750 if (code
!= ASM_OPERANDS
|| MEM_VOLATILE_P (x
))
751 reg_pending_barrier
= true;
753 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
754 We can not just fall through here since then we would be confused
755 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
756 traditional asms unlike their normal usage. */
758 if (code
== ASM_OPERANDS
)
760 for (j
= 0; j
< ASM_OPERANDS_INPUT_LENGTH (x
); j
++)
761 sched_analyze_2 (deps
, ASM_OPERANDS_INPUT (x
, j
), insn
);
771 /* These both read and modify the result. We must handle them as writes
772 to get proper dependencies for following instructions. We must handle
773 them as reads to get proper dependencies from this to previous
774 instructions. Thus we need to pass them to both sched_analyze_1
775 and sched_analyze_2. We must call sched_analyze_2 first in order
776 to get the proper antecedent for the read. */
777 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
778 sched_analyze_1 (deps
, x
, insn
);
783 /* op0 = op0 + op1 */
784 sched_analyze_2 (deps
, XEXP (x
, 0), insn
);
785 sched_analyze_2 (deps
, XEXP (x
, 1), insn
);
786 sched_analyze_1 (deps
, x
, insn
);
793 /* Other cases: walk the insn. */
794 fmt
= GET_RTX_FORMAT (code
);
795 for (i
= GET_RTX_LENGTH (code
) - 1; i
>= 0; i
--)
798 sched_analyze_2 (deps
, XEXP (x
, i
), insn
);
799 else if (fmt
[i
] == 'E')
800 for (j
= 0; j
< XVECLEN (x
, i
); j
++)
801 sched_analyze_2 (deps
, XVECEXP (x
, i
, j
), insn
);
805 /* Analyze an INSN with pattern X to find all dependencies. */
808 sched_analyze_insn (deps
, x
, insn
, loop_notes
)
813 RTX_CODE code
= GET_CODE (x
);
817 if (code
== COND_EXEC
)
819 sched_analyze_2 (deps
, COND_EXEC_TEST (x
), insn
);
821 /* ??? Should be recording conditions so we reduce the number of
822 false dependencies. */
823 x
= COND_EXEC_CODE (x
);
826 if (code
== SET
|| code
== CLOBBER
)
828 sched_analyze_1 (deps
, x
, insn
);
830 /* Bare clobber insns are used for letting life analysis, reg-stack
831 and others know that a value is dead. Depend on the last call
832 instruction so that reg-stack won't get confused. */
834 add_dependence_list (insn
, deps
->last_function_call
, REG_DEP_OUTPUT
);
836 else if (code
== PARALLEL
)
839 for (i
= XVECLEN (x
, 0) - 1; i
>= 0; i
--)
841 rtx sub
= XVECEXP (x
, 0, i
);
842 code
= GET_CODE (sub
);
844 if (code
== COND_EXEC
)
846 sched_analyze_2 (deps
, COND_EXEC_TEST (sub
), insn
);
847 sub
= COND_EXEC_CODE (sub
);
848 code
= GET_CODE (sub
);
850 if (code
== SET
|| code
== CLOBBER
)
851 sched_analyze_1 (deps
, sub
, insn
);
853 sched_analyze_2 (deps
, sub
, insn
);
857 sched_analyze_2 (deps
, x
, insn
);
859 /* Mark registers CLOBBERED or used by called function. */
860 if (GET_CODE (insn
) == CALL_INSN
)
862 for (link
= CALL_INSN_FUNCTION_USAGE (insn
); link
; link
= XEXP (link
, 1))
864 if (GET_CODE (XEXP (link
, 0)) == CLOBBER
)
865 sched_analyze_1 (deps
, XEXP (link
, 0), insn
);
867 sched_analyze_2 (deps
, XEXP (link
, 0), insn
);
869 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
870 reg_pending_barrier
= true;
873 if (GET_CODE (insn
) == JUMP_INSN
)
876 next
= next_nonnote_insn (insn
);
877 if (next
&& GET_CODE (next
) == BARRIER
)
878 reg_pending_barrier
= true;
881 rtx pending
, pending_mem
;
885 (*current_sched_info
->compute_jump_reg_dependencies
) (insn
, &tmp
);
886 /* Make latency of jump equal to 0 by using anti-dependence. */
887 EXECUTE_IF_SET_IN_REG_SET (&tmp
, 0, i
,
889 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
890 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_ANTI
);
891 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_ANTI
);
892 reg_last
->uses_length
++;
893 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
895 CLEAR_REG_SET (&tmp
);
897 /* All memory writes and volatile reads must happen before the
898 jump. Non-volatile reads must happen before the jump iff
899 the result is needed by the above register used mask. */
901 pending
= deps
->pending_write_insns
;
902 pending_mem
= deps
->pending_write_mems
;
905 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
906 pending
= XEXP (pending
, 1);
907 pending_mem
= XEXP (pending_mem
, 1);
910 pending
= deps
->pending_read_insns
;
911 pending_mem
= deps
->pending_read_mems
;
914 if (MEM_VOLATILE_P (XEXP (pending_mem
, 0)))
915 add_dependence (insn
, XEXP (pending
, 0), REG_DEP_OUTPUT
);
916 pending
= XEXP (pending
, 1);
917 pending_mem
= XEXP (pending_mem
, 1);
920 add_dependence_list (insn
, deps
->last_pending_memory_flush
,
925 /* If there is a {LOOP,EHREGION}_{BEG,END} note in the middle of a basic
926 block, then we must be sure that no instructions are scheduled across it.
927 Otherwise, the reg_n_refs info (which depends on loop_depth) would
933 /* Update loop_notes with any notes from this insn. Also determine
934 if any of the notes on the list correspond to instruction scheduling
935 barriers (loop, eh & setjmp notes, but not range notes). */
937 while (XEXP (link
, 1))
939 if (INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_BEG
940 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_LOOP_END
941 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_BEG
942 || INTVAL (XEXP (link
, 0)) == NOTE_INSN_EH_REGION_END
)
943 reg_pending_barrier
= true;
945 link
= XEXP (link
, 1);
947 XEXP (link
, 1) = REG_NOTES (insn
);
948 REG_NOTES (insn
) = loop_notes
;
951 /* If this instruction can throw an exception, then moving it changes
952 where block boundaries fall. This is mighty confusing elsewhere.
953 Therefore, prevent such an instruction from being moved. */
954 if (can_throw_internal (insn
))
955 reg_pending_barrier
= true;
957 /* Add dependencies if a scheduling barrier was found. */
958 if (reg_pending_barrier
)
960 /* In the case of barrier the most added dependencies are not
961 real, so we use anti-dependence here. */
962 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
964 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
966 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
967 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
968 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_ANTI
);
969 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_ANTI
);
974 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
976 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
977 add_dependence_list_and_free (insn
, ®_last
->uses
,
979 add_dependence_list_and_free (insn
, ®_last
->sets
,
981 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
983 reg_last
->uses_length
= 0;
984 reg_last
->clobbers_length
= 0;
988 for (i
= 0; i
< deps
->max_reg
; i
++)
990 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
991 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
992 SET_REGNO_REG_SET (&deps
->reg_last_in_use
, i
);
995 flush_pending_lists (deps
, insn
, true, true);
996 reg_pending_barrier
= false;
1000 /* If the current insn is conditional, we can't free any
1002 if (GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1004 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
1006 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1007 add_dependence_list (insn
, reg_last
->sets
, 0);
1008 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1009 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1010 reg_last
->uses_length
++;
1012 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1014 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1015 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1016 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1017 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1018 reg_last
->clobbers_length
++;
1020 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1022 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1023 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1024 add_dependence_list (insn
, reg_last
->clobbers
, REG_DEP_OUTPUT
);
1025 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1026 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1031 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses
, 0, i
,
1033 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1034 add_dependence_list (insn
, reg_last
->sets
, 0);
1035 add_dependence_list (insn
, reg_last
->clobbers
, 0);
1036 reg_last
->uses_length
++;
1037 reg_last
->uses
= alloc_INSN_LIST (insn
, reg_last
->uses
);
1039 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers
, 0, i
,
1041 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1042 if (reg_last
->uses_length
> MAX_PENDING_LIST_LENGTH
1043 || reg_last
->clobbers_length
> MAX_PENDING_LIST_LENGTH
)
1045 add_dependence_list_and_free (insn
, ®_last
->sets
,
1047 add_dependence_list_and_free (insn
, ®_last
->uses
,
1049 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1051 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1052 reg_last
->clobbers_length
= 0;
1053 reg_last
->uses_length
= 0;
1057 add_dependence_list (insn
, reg_last
->sets
, REG_DEP_OUTPUT
);
1058 add_dependence_list (insn
, reg_last
->uses
, REG_DEP_ANTI
);
1060 reg_last
->clobbers_length
++;
1061 reg_last
->clobbers
= alloc_INSN_LIST (insn
, reg_last
->clobbers
);
1063 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets
, 0, i
,
1065 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1066 add_dependence_list_and_free (insn
, ®_last
->sets
,
1068 add_dependence_list_and_free (insn
, ®_last
->clobbers
,
1070 add_dependence_list_and_free (insn
, ®_last
->uses
,
1072 reg_last
->sets
= alloc_INSN_LIST (insn
, reg_last
->sets
);
1073 reg_last
->uses_length
= 0;
1074 reg_last
->clobbers_length
= 0;
1078 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_uses
);
1079 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_clobbers
);
1080 IOR_REG_SET (&deps
->reg_last_in_use
, reg_pending_sets
);
1082 CLEAR_REG_SET (reg_pending_uses
);
1083 CLEAR_REG_SET (reg_pending_clobbers
);
1084 CLEAR_REG_SET (reg_pending_sets
);
1086 /* If we are currently in a libcall scheduling group, then mark the
1087 current insn as being in a scheduling group and that it can not
1088 be moved into a different basic block. */
1090 if (deps
->libcall_block_tail_insn
)
1092 set_sched_group_p (insn
);
1093 CANT_MOVE (insn
) = 1;
1096 /* If a post-call group is still open, see if it should remain so.
1097 This insn must be a simple move of a hard reg to a pseudo or
1100 We must avoid moving these insns for correctness on
1101 SMALL_REGISTER_CLASS machines, and for special registers like
1102 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
1103 hard regs for all targets. */
1105 if (deps
->in_post_call_group_p
)
1107 rtx tmp
, set
= single_set (insn
);
1108 int src_regno
, dest_regno
;
1111 goto end_call_group
;
1113 tmp
= SET_DEST (set
);
1114 if (GET_CODE (tmp
) == SUBREG
)
1115 tmp
= SUBREG_REG (tmp
);
1116 if (GET_CODE (tmp
) == REG
)
1117 dest_regno
= REGNO (tmp
);
1119 goto end_call_group
;
1121 tmp
= SET_SRC (set
);
1122 if (GET_CODE (tmp
) == SUBREG
)
1123 tmp
= SUBREG_REG (tmp
);
1124 if (GET_CODE (tmp
) == REG
)
1125 src_regno
= REGNO (tmp
);
1127 goto end_call_group
;
1129 if (src_regno
< FIRST_PSEUDO_REGISTER
1130 || dest_regno
< FIRST_PSEUDO_REGISTER
)
1132 set_sched_group_p (insn
);
1133 CANT_MOVE (insn
) = 1;
1138 deps
->in_post_call_group_p
= false;
1143 /* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
1144 for every dependency. */
1147 sched_analyze (deps
, head
, tail
)
1154 if (current_sched_info
->use_cselib
)
1157 for (insn
= head
;; insn
= NEXT_INSN (insn
))
1159 rtx link
, end_seq
, r0
, set
;
1161 if (GET_CODE (insn
) == INSN
|| GET_CODE (insn
) == JUMP_INSN
)
1163 /* Clear out the stale LOG_LINKS from flow. */
1164 free_INSN_LIST_list (&LOG_LINKS (insn
));
1166 /* Make each JUMP_INSN a scheduling barrier for memory
1168 if (GET_CODE (insn
) == JUMP_INSN
)
1170 /* Keep the list a reasonable size. */
1171 if (deps
->pending_flush_length
++ > MAX_PENDING_LIST_LENGTH
)
1172 flush_pending_lists (deps
, insn
, true, true);
1174 deps
->last_pending_memory_flush
1175 = alloc_INSN_LIST (insn
, deps
->last_pending_memory_flush
);
1177 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1180 else if (GET_CODE (insn
) == CALL_INSN
)
1184 CANT_MOVE (insn
) = 1;
1186 /* Clear out the stale LOG_LINKS from flow. */
1187 free_INSN_LIST_list (&LOG_LINKS (insn
));
1189 if (find_reg_note (insn
, REG_SETJMP
, NULL
))
1191 /* This is setjmp. Assume that all registers, not just
1192 hard registers, may be clobbered by this call. */
1193 reg_pending_barrier
= true;
1197 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
1198 /* A call may read and modify global register variables. */
1201 SET_REGNO_REG_SET (reg_pending_sets
, i
);
1202 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1204 /* Other call-clobbered hard regs may be clobbered.
1205 Since we only have a choice between 'might be clobbered'
1206 and 'definitely not clobbered', we must include all
1207 partly call-clobbered registers here. */
1208 else if (HARD_REGNO_CALL_PART_CLOBBERED (i
, reg_raw_mode
[i
])
1209 || TEST_HARD_REG_BIT (regs_invalidated_by_call
, i
))
1210 SET_REGNO_REG_SET (reg_pending_clobbers
, i
);
1211 /* We don't know what set of fixed registers might be used
1212 by the function, but it is certain that the stack pointer
1213 is among them, but be conservative. */
1214 else if (fixed_regs
[i
])
1215 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1216 /* The frame pointer is normally not used by the function
1217 itself, but by the debugger. */
1218 /* ??? MIPS o32 is an exception. It uses the frame pointer
1219 in the macro expansion of jal but does not represent this
1220 fact in the call_insn rtl. */
1221 else if (i
== FRAME_POINTER_REGNUM
1222 || (i
== HARD_FRAME_POINTER_REGNUM
1223 && (! reload_completed
|| frame_pointer_needed
)))
1224 SET_REGNO_REG_SET (reg_pending_uses
, i
);
1227 /* For each insn which shouldn't cross a call, add a dependence
1228 between that insn and this call insn. */
1229 add_dependence_list_and_free (insn
, &deps
->sched_before_next_call
,
1232 sched_analyze_insn (deps
, PATTERN (insn
), insn
, loop_notes
);
1235 /* In the absence of interprocedural alias analysis, we must flush
1236 all pending reads and writes, and start new dependencies starting
1237 from here. But only flush writes for constant calls (which may
1238 be passed a pointer to something we haven't written yet). */
1239 flush_pending_lists (deps
, insn
, true, !CONST_OR_PURE_CALL_P (insn
));
1241 /* Remember the last function call for limiting lifetimes. */
1242 free_INSN_LIST_list (&deps
->last_function_call
);
1243 deps
->last_function_call
= alloc_INSN_LIST (insn
, NULL_RTX
);
1245 /* Before reload, begin a post-call group, so as to keep the
1246 lifetimes of hard registers correct. */
1247 if (! reload_completed
)
1248 deps
->in_post_call_group_p
= true;
1251 /* See comments on reemit_notes as to why we do this.
1252 ??? Actually, the reemit_notes just say what is done, not why. */
1254 if (GET_CODE (insn
) == NOTE
1255 && (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_BEG
1256 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_LOOP_END
1257 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1258 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
))
1262 if (NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_BEG
1263 || NOTE_LINE_NUMBER (insn
) == NOTE_INSN_EH_REGION_END
)
1264 rtx_region
= GEN_INT (NOTE_EH_HANDLER (insn
));
1266 rtx_region
= GEN_INT (0);
1268 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1271 loop_notes
= alloc_EXPR_LIST (REG_SAVE_NOTE
,
1272 GEN_INT (NOTE_LINE_NUMBER (insn
)),
1274 CONST_OR_PURE_CALL_P (loop_notes
) = CONST_OR_PURE_CALL_P (insn
);
1277 if (current_sched_info
->use_cselib
)
1278 cselib_process_insn (insn
);
1280 /* Now that we have completed handling INSN, check and see if it is
1281 a CLOBBER beginning a libcall block. If it is, record the
1282 end of the libcall sequence.
1284 We want to schedule libcall blocks as a unit before reload. While
1285 this restricts scheduling, it preserves the meaning of a libcall
1288 As a side effect, we may get better code due to decreased register
1289 pressure as well as less chance of a foreign insn appearing in
1291 if (!reload_completed
1292 /* Note we may have nested libcall sequences. We only care about
1293 the outermost libcall sequence. */
1294 && deps
->libcall_block_tail_insn
== 0
1295 /* The sequence must start with a clobber of a register. */
1296 && GET_CODE (insn
) == INSN
1297 && GET_CODE (PATTERN (insn
)) == CLOBBER
1298 && (r0
= XEXP (PATTERN (insn
), 0), GET_CODE (r0
) == REG
)
1299 && GET_CODE (XEXP (PATTERN (insn
), 0)) == REG
1300 /* The CLOBBER must also have a REG_LIBCALL note attached. */
1301 && (link
= find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
)) != 0
1302 && (end_seq
= XEXP (link
, 0)) != 0
1303 /* The insn referenced by the REG_LIBCALL note must be a
1304 simple nop copy with the same destination as the register
1305 mentioned in the clobber. */
1306 && (set
= single_set (end_seq
)) != 0
1307 && SET_DEST (set
) == r0
&& SET_SRC (set
) == r0
1308 /* And finally the insn referenced by the REG_LIBCALL must
1309 also contain a REG_EQUAL note and a REG_RETVAL note. */
1310 && find_reg_note (end_seq
, REG_EQUAL
, NULL_RTX
) != 0
1311 && find_reg_note (end_seq
, REG_RETVAL
, NULL_RTX
) != 0)
1312 deps
->libcall_block_tail_insn
= XEXP (link
, 0);
1314 /* If we have reached the end of a libcall block, then close the
1316 if (deps
->libcall_block_tail_insn
== insn
)
1317 deps
->libcall_block_tail_insn
= 0;
1321 if (current_sched_info
->use_cselib
)
1330 /* The following function adds forward dependence (FROM, TO) with
1331 given DEP_TYPE. The forward dependence should be not exist before. */
1334 add_forward_dependence (from
, to
, dep_type
)
1337 enum reg_note dep_type
;
1341 #ifdef ENABLE_CHECKING
1342 /* If add_dependence is working properly there should never
1343 be notes, deleted insns or duplicates in the backward
1344 links. Thus we need not check for them here.
1346 However, if we have enabled checking we might as well go
1347 ahead and verify that add_dependence worked properly. */
1348 if (GET_CODE (from
) == NOTE
1349 || INSN_DELETED_P (from
)
1350 || (forward_dependency_cache
!= NULL
1351 && TEST_BIT (forward_dependency_cache
[INSN_LUID (from
)],
1353 || (forward_dependency_cache
== NULL
1354 && find_insn_list (to
, INSN_DEPEND (from
))))
1356 if (forward_dependency_cache
!= NULL
)
1357 SET_BIT (forward_dependency_cache
[INSN_LUID (from
)],
1361 new_link
= alloc_INSN_LIST (to
, INSN_DEPEND (from
));
1363 PUT_REG_NOTE_KIND (new_link
, dep_type
);
1365 INSN_DEPEND (from
) = new_link
;
1366 INSN_DEP_COUNT (to
) += 1;
1369 /* Examine insns in the range [ HEAD, TAIL ] and Use the backward
1370 dependences from LOG_LINKS to build forward dependences in
1374 compute_forward_dependences (head
, tail
)
1380 next_tail
= NEXT_INSN (tail
);
1381 for (insn
= head
; insn
!= next_tail
; insn
= NEXT_INSN (insn
))
1383 if (! INSN_P (insn
))
1386 for (link
= LOG_LINKS (insn
); link
; link
= XEXP (link
, 1))
1387 add_forward_dependence (XEXP (link
, 0), insn
, REG_NOTE_KIND (link
));
1391 /* Initialize variables for region data dependence analysis.
1392 n_bbs is the number of region blocks. */
1398 int max_reg
= (reload_completed
? FIRST_PSEUDO_REGISTER
: max_reg_num ());
1400 deps
->max_reg
= max_reg
;
1401 deps
->reg_last
= (struct deps_reg
*)
1402 xcalloc (max_reg
, sizeof (struct deps_reg
));
1403 INIT_REG_SET (&deps
->reg_last_in_use
);
1405 deps
->pending_read_insns
= 0;
1406 deps
->pending_read_mems
= 0;
1407 deps
->pending_write_insns
= 0;
1408 deps
->pending_write_mems
= 0;
1409 deps
->pending_lists_length
= 0;
1410 deps
->pending_flush_length
= 0;
1411 deps
->last_pending_memory_flush
= 0;
1412 deps
->last_function_call
= 0;
1413 deps
->sched_before_next_call
= 0;
1414 deps
->in_post_call_group_p
= false;
1415 deps
->libcall_block_tail_insn
= 0;
1418 /* Free insn lists found in DEPS. */
1426 free_INSN_LIST_list (&deps
->pending_read_insns
);
1427 free_EXPR_LIST_list (&deps
->pending_read_mems
);
1428 free_INSN_LIST_list (&deps
->pending_write_insns
);
1429 free_EXPR_LIST_list (&deps
->pending_write_mems
);
1430 free_INSN_LIST_list (&deps
->last_pending_memory_flush
);
1432 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
1433 times. For a test case with 42000 regs and 8000 small basic blocks,
1434 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
1435 EXECUTE_IF_SET_IN_REG_SET (&deps
->reg_last_in_use
, 0, i
,
1437 struct deps_reg
*reg_last
= &deps
->reg_last
[i
];
1439 free_INSN_LIST_list (®_last
->uses
);
1441 free_INSN_LIST_list (®_last
->sets
);
1442 if (reg_last
->clobbers
)
1443 free_INSN_LIST_list (®_last
->clobbers
);
1445 CLEAR_REG_SET (&deps
->reg_last_in_use
);
1447 free (deps
->reg_last
);
1450 /* If it is profitable to use them, initialize caches for tracking
1451 dependency information. LUID is the number of insns to be scheduled,
1452 it is used in the estimate of profitability. */
1455 init_dependency_caches (luid
)
1458 /* ?!? We could save some memory by computing a per-region luid mapping
1459 which could reduce both the number of vectors in the cache and the size
1460 of each vector. Instead we just avoid the cache entirely unless the
1461 average number of instructions in a basic block is very high. See
1462 the comment before the declaration of true_dependency_cache for
1463 what we consider "very high". */
1464 if (luid
/ n_basic_blocks
> 100 * 5)
1466 true_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1467 sbitmap_vector_zero (true_dependency_cache
, luid
);
1468 anti_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1469 sbitmap_vector_zero (anti_dependency_cache
, luid
);
1470 output_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1471 sbitmap_vector_zero (output_dependency_cache
, luid
);
1472 #ifdef ENABLE_CHECKING
1473 forward_dependency_cache
= sbitmap_vector_alloc (luid
, luid
);
1474 sbitmap_vector_zero (forward_dependency_cache
, luid
);
1479 /* Free the caches allocated in init_dependency_caches. */
1482 free_dependency_caches ()
1484 if (true_dependency_cache
)
1486 sbitmap_vector_free (true_dependency_cache
);
1487 true_dependency_cache
= NULL
;
1488 sbitmap_vector_free (anti_dependency_cache
);
1489 anti_dependency_cache
= NULL
;
1490 sbitmap_vector_free (output_dependency_cache
);
1491 output_dependency_cache
= NULL
;
1492 #ifdef ENABLE_CHECKING
1493 sbitmap_vector_free (forward_dependency_cache
);
1494 forward_dependency_cache
= NULL
;
1499 /* Initialize some global variables needed by the dependency analysis
1505 reg_pending_sets
= INITIALIZE_REG_SET (reg_pending_sets_head
);
1506 reg_pending_clobbers
= INITIALIZE_REG_SET (reg_pending_clobbers_head
);
1507 reg_pending_uses
= INITIALIZE_REG_SET (reg_pending_uses_head
);
1508 reg_pending_barrier
= false;
1511 /* Free everything used by the dependency analysis code. */
1514 finish_deps_global ()
1516 FREE_REG_SET (reg_pending_sets
);
1517 FREE_REG_SET (reg_pending_clobbers
);
1518 FREE_REG_SET (reg_pending_uses
);